summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--CREDITS9
-rw-r--r--Documentation/ABI/stable/sysfs-devices2
-rw-r--r--Documentation/ABI/testing/evm48
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mmc4
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu6
-rw-r--r--Documentation/ABI/testing/sysfs-power6
-rw-r--r--Documentation/Makefile6
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html2
-rw-r--r--Documentation/admin-guide/README.rst2
-rw-r--r--Documentation/admin-guide/bug-hunting.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt28
-rw-r--r--Documentation/admin-guide/reporting-bugs.rst4
-rw-r--r--Documentation/cdrom/ide-cd6
-rw-r--r--Documentation/core-api/kernel-api.rst55
-rw-r--r--Documentation/dev-tools/coccinelle.rst2
-rw-r--r--Documentation/dev-tools/kselftest.rst34
-rw-r--r--Documentation/devicetree/bindings/hwmon/gpio-fan.txt (renamed from Documentation/devicetree/bindings/gpio/gpio-fan.txt)0
-rw-r--r--Documentation/devicetree/bindings/hwmon/max1619.txt12
-rw-r--r--Documentation/devicetree/bindings/hwmon/max31785.txt22
-rw-r--r--Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.txt54
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.txt18
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-omap.txt16
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt70
-rw-r--r--Documentation/devicetree/bindings/regulator/da9211.txt82
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt6
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt13
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-davinci.txt10
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rspi.txt5
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sprd-adi.txt58
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.txt1
-rw-r--r--Documentation/dmaengine/00-INDEX8
-rw-r--r--Documentation/dmaengine/client.txt222
-rw-r--r--Documentation/dmaengine/provider.txt424
-rw-r--r--Documentation/dmaengine/pxa_dma.txt153
-rw-r--r--Documentation/doc-guide/kernel-doc.rst2
-rw-r--r--Documentation/driver-api/dmaengine/client.rst275
-rw-r--r--Documentation/driver-api/dmaengine/dmatest.rst (renamed from Documentation/dmaengine/dmatest.txt)96
-rw-r--r--Documentation/driver-api/dmaengine/index.rst55
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst508
-rw-r--r--Documentation/driver-api/dmaengine/pxa_dma.rst190
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/usb/usb.rst4
-rw-r--r--Documentation/fb/fbcon.txt4
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt2
-rw-r--r--Documentation/filesystems/dnotify.txt2
-rw-r--r--Documentation/filesystems/ext4.txt8
-rw-r--r--Documentation/hid/hiddev.txt2
-rw-r--r--Documentation/hwmon/max3178551
-rw-r--r--Documentation/hwmon/sht153
-rw-r--r--Documentation/input/devices/xpad.rst3
-rw-r--r--Documentation/laptops/laptop-mode.txt6
-rw-r--r--Documentation/locking/rt-mutex-design.txt2
-rw-r--r--Documentation/media/dvb-drivers/bt8xx.rst8
-rw-r--r--Documentation/media/uapi/v4l/dev-sliced-vbi.rst2
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-reserved.rst2
-rw-r--r--Documentation/media/v4l-drivers/bttv.rst2
-rw-r--r--Documentation/media/v4l-drivers/max2175.rst2
-rw-r--r--Documentation/networking/cdc_mbim.txt4
-rw-r--r--Documentation/networking/checksum-offloads.txt2
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/pi-futex.txt2
-rw-r--r--Documentation/power/interface.txt5
-rw-r--r--Documentation/power/pci.txt10
-rw-r--r--Documentation/power/runtime_pm.txt2
-rw-r--r--Documentation/process/3.Early-stage.rst2
-rw-r--r--Documentation/process/4.Coding.rst2
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/process/kernel-driver-statement.rst199
-rw-r--r--Documentation/process/submitting-drivers.rst2
-rw-r--r--Documentation/process/submitting-patches.rst8
-rw-r--r--Documentation/security/LSM.rst2
-rw-r--r--Documentation/security/credentials.rst2
-rw-r--r--Documentation/security/keys/request-key.rst2
-rw-r--r--Documentation/sound/cards/joystick.rst2
-rw-r--r--Documentation/sound/hd-audio/notes.rst2
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst2
-rw-r--r--Documentation/sysctl/README2
-rw-r--r--Documentation/sysctl/fs.txt2
-rw-r--r--Documentation/timers/highres.txt4
-rw-r--r--Documentation/trace/ftrace-uses.rst293
-rw-r--r--Documentation/trace/intel_th.txt2
-rw-r--r--Documentation/usb/gadget-testing.txt2
-rw-r--r--Documentation/watchdog/hpwdt.txt2
-rw-r--r--Documentation/watchdog/pcwd-watchdog.txt2
-rw-r--r--MAINTAINERS59
-rw-r--r--Makefile5
-rw-r--r--arch/arm/kernel/traps.c28
-rw-r--r--arch/arm/mach-pxa/stargate2.c17
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi12
-rw-r--r--arch/mips/ar7/platform.c5
-rw-r--r--arch/mips/ar7/prom.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c10
-rw-r--r--arch/powerpc/kvm/book3s_hv.c29
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S12
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S12
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c11
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/kernel/idt.c2
-rw-r--r--arch/x86/kernel/smpboot.c11
-rw-r--r--arch/x86/kernel/traps.c10
-rw-r--r--arch/x86/kernel/tsc.c8
-rw-r--r--arch/x86/kernel/unwind_orc.c2
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/oprofile/op_model_ppro.c4
-rw-r--r--crypto/ccm.c4
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regmap-spi.c2
-rw-r--r--drivers/base/regmap/regmap-spmi.c4
-rw-r--r--drivers/base/regmap/regmap.c111
-rw-r--r--drivers/block/rbd.c4
-rw-r--r--drivers/char/tpm/tpm-dev-common.c6
-rw-r--r--drivers/char/tpm/tpm-sysfs.c87
-rw-r--r--drivers/char/tpm/tpm.h15
-rw-r--r--drivers/char/tpm/tpm2-cmd.c73
-rw-r--r--drivers/char/tpm/tpm2-space.c4
-rw-r--r--drivers/char/tpm/tpm_crb.c59
-rw-r--r--drivers/char/tpm/tpm_tis.c5
-rw-r--r--drivers/char/tpm/tpm_tis_core.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.h4
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c73
-rw-r--r--drivers/edac/amd64_edac.c5
-rw-r--r--drivers/edac/edac_mc.c7
-rw-r--r--drivers/edac/edac_mc.h8
-rw-r--r--drivers/edac/ghes_edac.c137
-rw-r--r--drivers/edac/i7core_edac.c11
-rw-r--r--drivers/edac/pnd2_edac.c9
-rw-r--r--drivers/edac/sb_edac.c43
-rw-r--r--drivers/edac/skx_edac.c30
-rw-r--r--drivers/edac/thunderx_edac.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/asc7621.c1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c8
-rw-r--r--drivers/hwmon/gpio-fan.c224
-rw-r--r--drivers/hwmon/k10temp.c108
-rw-r--r--drivers/hwmon/max1619.c10
-rw-r--r--drivers/hwmon/max6621.c593
-rw-r--r--drivers/hwmon/pmbus/Kconfig10
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/max31785.c116
-rw-r--r--drivers/hwmon/pmbus/pmbus.h6
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c25
-rw-r--r--drivers/hwmon/sht15.c175
-rw-r--r--drivers/hwmon/stts751.c18
-rw-r--r--drivers/hwmon/w83793.c4
-rw-r--r--drivers/hwmon/xgene-hwmon.c39
-rw-r--r--drivers/ide/ide-cd.c7
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/rmi4/rmi_smbus.c4
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c1
-rw-r--r--drivers/mmc/core/block.c346
-rw-r--r--drivers/mmc/core/bus.c7
-rw-r--r--drivers/mmc/core/core.c262
-rw-r--r--drivers/mmc/core/core.h16
-rw-r--r--drivers/mmc/core/host.c20
-rw-r--r--drivers/mmc/core/host.h7
-rw-r--r--drivers/mmc/core/mmc.c46
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/queue.c41
-rw-r--r--drivers/mmc/core/queue.h4
-rw-r--r--drivers/mmc/core/sd.c51
-rw-r--r--drivers/mmc/core/sdio_irq.c3
-rw-r--r--drivers/mmc/host/Kconfig28
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c13
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c84
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c7
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c768
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c285
-rw-r--r--drivers/mmc/host/mvsdio.c6
-rw-r--r--drivers/mmc/host/mxcmmc.c11
-rw-r--r--drivers/mmc/host/omap.c20
-rw-r--r--drivers/mmc/host/omap_hsmmc.c35
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c5
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c38
-rw-r--r--drivers/mmc/host/sdhci-acpi.c174
-rw-r--r--drivers/mmc/host/sdhci-cadence.c28
-rw-r--r--drivers/mmc/host/sdhci-msm.c326
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c3
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c58
-rw-r--r--drivers/mmc/host/sdhci-omap.c607
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c11
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c35
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.h73
-rw-r--r--drivers/mmc/host/sdhci-pci.h13
-rw-r--r--drivers/mmc/host/sdhci-s3c.c18
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c14
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/mmc/host/tifm_sd.c6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c36
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c8
-rw-r--r--drivers/mmc/host/vub300.c41
-rw-r--r--drivers/mmc/host/wbsd.c8
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/c_can/c_can_platform.c1
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c6
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c14
-rw-r--r--drivers/net/can/sun4i_can.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c7
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/axp20x-regulator.c92
-rw-r--r--drivers/regulator/da9211-regulator.c14
-rw-r--r--drivers/regulator/da9211-regulator.h2
-rw-r--r--drivers/regulator/pbias-regulator.c21
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c48
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_transport_srp.c5
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-armada-3700.c17
-rw-r--r--drivers/spi/spi-axi-spi-engine.c4
-rw-r--r--drivers/spi/spi-fsl-dspi.c66
-rw-r--r--drivers/spi/spi-imx.c256
-rw-r--r--drivers/spi/spi-mxs.c120
-rw-r--r--drivers/spi/spi-orion.c1
-rw-r--r--drivers/spi/spi-rspi.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c3
-rw-r--r--drivers/spi/spi-sh-msiof.c12
-rw-r--r--drivers/spi/spi-sprd-adi.c418
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--fs/file_table.c2
-rw-r--r--include/asm-generic/div64.h14
-rw-r--r--include/linux/bitmap.h114
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/gpio-fan.h36
-rw-r--r--include/linux/kallsyms.h14
-rw-r--r--include/linux/log2.h42
-rw-r--r--include/linux/math64.h27
-rw-r--r--include/linux/mfd/axp20x.h3
-rw-r--r--include/linux/mfd/rtsx_pci.h1
-rw-r--r--include/linux/mfd/tps65218.h19
-rw-r--r--include/linux/mmc/host.h11
-rw-r--r--include/linux/mmc/sdhci-pci-data.h3
-rw-r--r--include/linux/module.h7
-rw-r--r--include/linux/platform_data/sht15.h38
-rw-r--r--include/linux/printk.h3
-rw-r--r--include/linux/regmap.h64
-rw-r--r--include/linux/regulator/da9211.h5
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/spi/spi-fsl-dspi.h31
-rw-r--r--include/linux/sysctl.h5
-rw-r--r--include/net/act_api.h4
-rw-r--r--include/net/pkt_cls.h24
-rw-r--r--include/sound/seq_kernel.h3
-rw-r--r--include/sound/timer.h2
-rw-r--r--include/uapi/drm/i915_drm.h1
-rw-r--r--include/uapi/linux/xattr.h3
-rw-r--r--kernel/kallsyms.c43
-rw-r--r--kernel/module.c32
-rw-r--r--kernel/sched/cpufreq_schedutil.c6
-rw-r--r--kernel/workqueue_internal.h3
-rw-r--r--lib/asn1_decoder.c4
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/crc4.c2
-rw-r--r--lib/crc8.c22
-rw-r--r--lib/div64.c6
-rw-r--r--lib/gcd.c6
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/dsa/switch.c4
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_offload.c12
-rw-r--r--net/l2tp/l2tp_ip.c24
-rw-r--r--net/l2tp/l2tp_ip6.c24
-rw-r--r--net/qrtr/qrtr.c2
-rw-r--r--net/rds/ib_recv.c10
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_connmark.c2
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ife.c2
-rw-r--r--net/sched/act_ipt.c4
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_skbedit.c2
-rw-r--r--net/sched/act_skbmod.c2
-rw-r--r--net/sched/act_tunnel_key.c2
-rw-r--r--net/sched/act_vlan.c2
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/cls_basic.c20
-rw-r--r--net/sched/cls_bpf.c7
-rw-r--r--net/sched/cls_cgroup.c24
-rw-r--r--net/sched/cls_flow.c24
-rw-r--r--net/sched/cls_flower.c16
-rw-r--r--net/sched/cls_fw.c17
-rw-r--r--net/sched/cls_matchall.c15
-rw-r--r--net/sched/cls_route.c17
-rw-r--r--net/sched/cls_rsvp.h15
-rw-r--r--net/sched/cls_tcindex.c33
-rw-r--r--net/sched/cls_u32.c8
-rw-r--r--net/xfrm/xfrm_input.c4
-rw-r--r--net/xfrm/xfrm_policy.c71
-rw-r--r--samples/connector/cn_test.c6
-rwxr-xr-xscripts/documentation-file-ref-check15
-rwxr-xr-xscripts/find-unused-docs.sh62
-rwxr-xr-xscripts/kernel-doc17
-rwxr-xr-xscripts/leaking_addresses.pl305
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--security/apparmor/ipc.c4
-rw-r--r--security/commoncap.c193
-rw-r--r--security/integrity/digsig.c14
-rw-r--r--security/integrity/evm/evm.h3
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/integrity/evm/evm_main.c3
-rw-r--r--security/integrity/evm/evm_secfs.c29
-rw-r--r--security/integrity/iint.c49
-rw-r--r--security/integrity/ima/ima_api.c67
-rw-r--r--security/integrity/ima/ima_appraise.c4
-rw-r--r--security/integrity/ima/ima_crypto.c10
-rw-r--r--security/integrity/ima/ima_fs.c6
-rw-r--r--security/integrity/ima/ima_main.c23
-rw-r--r--security/integrity/ima/ima_policy.c6
-rw-r--r--security/integrity/integrity.h2
-rw-r--r--security/smack/smack_lsm.c79
-rw-r--r--security/tomoyo/audit.c2
-rw-r--r--security/tomoyo/common.c4
-rw-r--r--security/tomoyo/common.h2
-rw-r--r--security/tomoyo/util.c39
-rw-r--r--sound/core/hrtimer.c1
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c4
-rw-r--r--sound/core/seq/oss/seq_oss_readq.c29
-rw-r--r--sound/core/seq/oss/seq_oss_readq.h2
-rw-r--r--sound/core/timer.c67
-rw-r--r--sound/pci/hda/patch_realtek.c5
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/include/uapi/drm/i915_drm.h1
-rw-r--r--tools/perf/builtin-trace.c10
-rw-r--r--tools/perf/util/parse-events.l5
371 files changed, 9477 insertions, 3070 deletions
diff --git a/.mailmap b/.mailmap
index 4757d361fd33..c021f29779a7 100644
--- a/.mailmap
+++ b/.mailmap
@@ -102,6 +102,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
+Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Mark Brown <broonie@sirena.org.uk>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
diff --git a/CREDITS b/CREDITS
index 9fbd2c77b546..a3ec0c744172 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2113,6 +2113,10 @@ S: J. Obrechtstr 23
S: NL-5216 GP 's-Hertogenbosch
S: The Netherlands
+N: Ashley Lai
+E: ashleydlai@gmail.com
+D: IBM VTPM driver
+
N: Savio Lam
E: lam836@cs.cuhk.hk
D: Author of the dialog utility, foundation
@@ -3333,6 +3337,10 @@ S: Braunschweiger Strasse 79
S: 31134 Hildesheim
S: Germany
+N: Marcel Selhorst
+E: tpmdd@selhorst.net
+D: TPM driver
+
N: Darren Senn
E: sinster@darkwater.com
D: Whatever I notice needs doing (so far: itimers, /proc)
@@ -4128,7 +4136,6 @@ D: MD driver
D: EISA/sysfs subsystem
S: France
-
# Don't add your name here, unless you really _are_ after Marc
# alphabetically. Leonard used to be very proud of being the
# last entry, and he'll get positively pissed if he can't even
diff --git a/Documentation/ABI/stable/sysfs-devices b/Documentation/ABI/stable/sysfs-devices
index 35c457f8ce73..4404bd9b96c1 100644
--- a/Documentation/ABI/stable/sysfs-devices
+++ b/Documentation/ABI/stable/sysfs-devices
@@ -1,5 +1,5 @@
# Note: This documents additional properties of any device beyond what
-# is documented in Documentation/sysfs-rules.txt
+# is documented in Documentation/admin-guide/sysfs-rules.rst
What: /sys/devices/*/of_node
Date: February 2015
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
index 8374d4557e5d..9578247e1792 100644
--- a/Documentation/ABI/testing/evm
+++ b/Documentation/ABI/testing/evm
@@ -7,17 +7,37 @@ Description:
HMAC-sha1 value across the extended attributes, storing the
value as the extended attribute 'security.evm'.
- EVM depends on the Kernel Key Retention System to provide it
- with a trusted/encrypted key for the HMAC-sha1 operation.
- The key is loaded onto the root's keyring using keyctl. Until
- EVM receives notification that the key has been successfully
- loaded onto the keyring (echo 1 > <securityfs>/evm), EVM
- can not create or validate the 'security.evm' xattr, but
- returns INTEGRITY_UNKNOWN. Loading the key and signaling EVM
- should be done as early as possible. Normally this is done
- in the initramfs, which has already been measured as part
- of the trusted boot. For more information on creating and
- loading existing trusted/encrypted keys, refer to:
- Documentation/keys-trusted-encrypted.txt. (A sample dracut
- patch, which loads the trusted/encrypted key and enables
- EVM, is available from http://linux-ima.sourceforge.net/#EVM.)
+ EVM supports two classes of security.evm. The first is
+ an HMAC-sha1 generated locally with a
+ trusted/encrypted key stored in the Kernel Key
+ Retention System. The second is a digital signature
+ generated either locally or remotely using an
+ asymmetric key. These keys are loaded onto root's
+ keyring using keyctl, and EVM is then enabled by
+ echoing a value to <securityfs>/evm:
+
+ 1: enable HMAC validation and creation
+ 2: enable digital signature validation
+ 3: enable HMAC and digital signature validation and HMAC
+ creation
+
+ Further writes will be blocked if HMAC support is enabled or
+ if bit 32 is set:
+
+ echo 0x80000002 ><securityfs>/evm
+
+ will enable digital signature validation and block
+ further writes to <securityfs>/evm.
+
+ Until this is done, EVM can not create or validate the
+ 'security.evm' xattr, but returns INTEGRITY_UNKNOWN.
+ Loading keys and signaling EVM should be done as early
+ as possible. Normally this is done in the initramfs,
+ which has already been measured as part of the trusted
+ boot. For more information on creating and loading
+ existing trusted/encrypted keys, refer to:
+
+ Documentation/security/keys/trusted-encrypted.rst. Both dracut
+ (via 97masterkey and 98integrity) and systemd (via
+ core/ima-setup) have support for loading keys at boot
+ time.
diff --git a/Documentation/ABI/testing/sysfs-bus-mmc b/Documentation/ABI/testing/sysfs-bus-mmc
new file mode 100644
index 000000000000..519f028d19cc
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-mmc
@@ -0,0 +1,4 @@
+What: /sys/bus/mmc/devices/.../rev
+Date: October 2017
+Contact: Jin Qian <jinqian@android.com>
+Description: Extended CSD revision number
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index f3d5817c4ef0..d6d862db3b5d 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -187,7 +187,8 @@ Description: Processor frequency boosting control
This switch controls the boost setting for the whole system.
Boosting allows the CPU and the firmware to run at a frequency
beyound it's nominal limit.
- More details can be found in Documentation/cpu-freq/boost.txt
+ More details can be found in
+ Documentation/admin-guide/pm/cpufreq.rst
What: /sys/devices/system/cpu/cpu#/crash_notes
@@ -223,7 +224,8 @@ Description: Parameters for the Intel P-state driver
no_turbo: limits the driver to selecting P states below the turbo
frequency range.
- More details can be found in Documentation/cpu-freq/intel-pstate.txt
+ More details can be found in
+ Documentation/admin-guide/pm/intel_pstate.rst
What: /sys/devices/system/cpu/cpu*/cache/index*/<set_of_attributes_mentioned_below>
Date: July 2014(documented, existed before August 2008)
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index a1d1612f3651..1e0d1dac706b 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -18,7 +18,8 @@ Description:
Writing one of the above strings to this file causes the system
to transition into the corresponding state, if available.
- See Documentation/power/states.txt for more information.
+ See Documentation/admin-guide/pm/sleep-states.rst for more
+ information.
What: /sys/power/mem_sleep
Date: November 2016
@@ -35,7 +36,8 @@ Description:
represented by it to be used on subsequent attempts to suspend
the system.
- See Documentation/power/states.txt for more information.
+ See Documentation/admin-guide/pm/sleep-states.rst for more
+ information.
What: /sys/power/disk
Date: September 2006
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 85f7856f0092..2ca77ad0f238 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -97,6 +97,9 @@ endif # HAVE_SPHINX
# The following targets are independent of HAVE_SPHINX, and the rules should
# work or silently pass without Sphinx.
+refcheckdocs:
+ $(Q)cd $(srctree);scripts/documentation-file-ref-check
+
cleandocs:
$(Q)rm -rf $(BUILDDIR)
$(Q)$(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media clean
@@ -109,6 +112,7 @@ dochelp:
@echo ' epubdocs - EPUB'
@echo ' xmldocs - XML'
@echo ' linkcheckdocs - check for broken external links (will connect to external hosts)'
+ @echo ' refcheckdocs - check for references to non-existing files under Documentation'
@echo ' cleandocs - clean all generated files'
@echo
@echo ' make SPHINXDIRS="s1 s2" [target] Generate only docs of folder s1, s2'
@@ -116,3 +120,5 @@ dochelp:
@echo
@echo ' make SPHINX_CONF={conf-file} [target] use *additional* sphinx-build'
@echo ' configuration. This is e.g. useful to build with nit-picking config.'
+ @echo
+ @echo ' Default location for the generated documents is Documentation/output'
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
index e5d0bbd0230b..7394f034be65 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
@@ -527,7 +527,7 @@ grace period also drove it to completion.
This straightforward approach had the disadvantage of needing to
account for POSIX signals sent to user tasks,
so more recent implemementations use the Linux kernel's
-<a href="https://www.kernel.org/doc/Documentation/workqueue.txt">workqueues</a>.
+<a href="https://www.kernel.org/doc/Documentation/core-api/workqueue.rst">workqueues</a>.
<p>
The requesting task still does counter snapshotting and funnel-lock
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index b5343c5aa224..63066db39910 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -350,7 +350,7 @@ If something goes wrong
help debugging the problem. The text above the dump is also
important: it tells something about why the kernel dumped code (in
the above example, it's due to a bad kernel pointer). More information
- on making sense of the dump is in Documentation/admin-guide/oops-tracing.rst
+ on making sense of the dump is in Documentation/admin-guide/bug-hunting.rst
- If you compiled the kernel with CONFIG_KALLSYMS you can send the dump
as is, otherwise you will have to use the ``ksymoops`` program to make
diff --git a/Documentation/admin-guide/bug-hunting.rst b/Documentation/admin-guide/bug-hunting.rst
index 08c4b1308189..f278b289e260 100644
--- a/Documentation/admin-guide/bug-hunting.rst
+++ b/Documentation/admin-guide/bug-hunting.rst
@@ -240,7 +240,7 @@ In order to report it upstream, you should identify the mailing list
used for the development of the affected code. This can be done by using
the ``get_maintainer.pl`` script.
-For example, if you find a bug at the gspca's conex.c file, you can get
+For example, if you find a bug at the gspca's sonixj.c file, you can get
their maintainers with::
$ ./scripts/get_maintainer.pl -f drivers/media/usb/gspca/sonixj.c
@@ -257,7 +257,7 @@ Please notice that it will point to:
Tejun and Bhaktipriya (in this specific case, none really envolved on the
development of this file);
- The driver maintainer (Hans Verkuil);
-- The subsystem maintainer (Mauro Carvalho Chehab)
+- The subsystem maintainer (Mauro Carvalho Chehab);
- The driver and/or subsystem mailing list (linux-media@vger.kernel.org);
- the Linux Kernel mailing list (linux-kernel@vger.kernel.org).
@@ -274,14 +274,14 @@ Fixing the bug
--------------
If you know programming, you could help us by not only reporting the bug,
-but also providing us with a solution. After all open source is about
+but also providing us with a solution. After all, open source is about
sharing what you do and don't you want to be recognised for your genius?
If you decide to take this way, once you have worked out a fix please submit
it upstream.
Please do read
-ref:`Documentation/process/submitting-patches.rst <submittingpatches>` though
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>` though
to help your code get accepted.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 05496622b4ef..97a76eea7389 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -314,7 +314,7 @@
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b>
- See also Documentation/input/joystick.txt
+ See also Documentation/input/joydev/joystick.rst
analog.map= [HW,JOY] Analog joystick and gamepad support
Specifies type or capabilities of an analog joystick
@@ -439,7 +439,7 @@
bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards)
bttv.radio= Most important insmod options are available as
kernel args too.
- bttv.pll= See Documentation/video4linux/bttv/Insmod-options
+ bttv.pll= See Documentation/media/v4l-drivers/bttv.rst
bttv.tuner=
bulk_remove=off [PPC] This parameter disables the use of the pSeries
@@ -641,8 +641,8 @@
For now, only VisioBraille is supported.
consoleblank= [KNL] The console blank (screen saver) timeout in
- seconds. Defaults to 10*60 = 10mins. A value of 0
- disables the blank timer.
+ seconds. A value of 0 disables the blank timer.
+ Defaults to 0.
coredump_filter=
[KNL] Change the default value for
@@ -724,7 +724,7 @@
db9.dev[2|3]= [HW,JOY] Multisystem joystick support via parallel port
(one device per port)
Format: <port#>,<type>
- See also Documentation/input/joystick-parport.txt
+ See also Documentation/input/devices/joystick-parport.rst
ddebug_query= [KNL,DYNAMIC_DEBUG] Enable debug messages at early boot
time. See
@@ -1220,7 +1220,7 @@
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
support via parallel port (up to 5 devices per port)
Format: <port#>,<pad1>,<pad2>,<pad3>,<pad4>,<pad5>
- See also Documentation/input/joystick-parport.txt
+ See also Documentation/input/devices/joystick-parport.rst
gamma= [HW,DRM]
@@ -1766,7 +1766,7 @@
ivrs_acpihid[00:14.5]=AMD0020:0
js= [HW,JOY] Analog joystick
- See Documentation/input/joystick.txt.
+ See Documentation/input/joydev/joystick.rst.
nokaslr [KNL]
When CONFIG_RANDOMIZE_BASE is set, this disables
@@ -2248,10 +2248,10 @@
s2idle - Suspend-To-Idle
shallow - Power-On Suspend or equivalent (if supported)
deep - Suspend-To-RAM or equivalent (if supported)
- See Documentation/power/states.txt.
+ See Documentation/admin-guide/pm/sleep-states.rst.
meye.*= [HW] Set MotionEye Camera parameters
- See Documentation/video4linux/meye.txt.
+ See Documentation/media/v4l-drivers/meye.rst.
mfgpt_irq= [IA-32] Specify the IRQ to use for the
Multi-Function General Purpose Timers on AMD Geode
@@ -3134,7 +3134,7 @@
plip= [PPT,NET] Parallel port network link
Format: { parport<nr> | timid | 0 }
- See also Documentation/parport.txt.
+ See also Documentation/admin-guide/parport.rst.
pmtmr= [X86] Manual setup of pmtmr I/O Port.
Override pmtimer IOPort with a hex value.
@@ -3885,6 +3885,12 @@
[KNL] Should the soft-lockup detector generate panics.
Format: <integer>
+ A nonzero value instructs the soft-lockup detector
+ to panic the machine when a soft-lockup occurs. This
+ is also controlled by CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC
+ which is the respective build-time switch to that
+ functionality.
+
softlockup_all_cpu_backtrace=
[KNL] Should the soft-lockup detector generate
backtraces on all cpus.
@@ -4199,7 +4205,7 @@
TurboGraFX parallel port interface
Format:
<port#>,<js1>,<js2>,<js3>,<js4>,<js5>,<js6>,<js7>
- See also Documentation/input/joystick-parport.txt
+ See also Documentation/input/devices/joystick-parport.rst
udbg-immortal [PPC] When debugging early kernel crashes that
happen after console_init() and before a proper
diff --git a/Documentation/admin-guide/reporting-bugs.rst b/Documentation/admin-guide/reporting-bugs.rst
index 26b60b419652..4650edb8840a 100644
--- a/Documentation/admin-guide/reporting-bugs.rst
+++ b/Documentation/admin-guide/reporting-bugs.rst
@@ -94,7 +94,7 @@ step-by-step instructions for how a user can trigger the bug.
If the failure includes an "OOPS:", take a picture of the screen, capture
a netconsole trace, or type the message from your screen into the bug
-report. Please read "Documentation/admin-guide/oops-tracing.rst" before posting your
+report. Please read "Documentation/admin-guide/bug-hunting.rst" before posting your
bug report. This explains what you should do with the "Oops" information
to make it useful to the recipient.
@@ -120,7 +120,7 @@ summary from [1.]>" for easy identification by the developers::
[4.2.] Kernel .config file:
[5.] Most recent kernel version which did not have the bug:
[6.] Output of Oops.. message (if applicable) with symbolic information
- resolved (see Documentation/admin-guide/oops-tracing.rst)
+ resolved (see Documentation/admin-guide/bug-hunting.rst)
[7.] A small shell script or example program which triggers the
problem (if possible)
[8.] Environment
diff --git a/Documentation/cdrom/ide-cd b/Documentation/cdrom/ide-cd
index f4dc9de2694e..a5f2a7f1ff46 100644
--- a/Documentation/cdrom/ide-cd
+++ b/Documentation/cdrom/ide-cd
@@ -55,13 +55,9 @@ This driver provides the following features:
(to compile support as a module which can be loaded and unloaded)
to the options:
- Enhanced IDE/MFM/RLL disk/cdrom/tape/floppy support
+ ATA/ATAPI/MFM/RLL support
Include IDE/ATAPI CDROM support
- and `no' to
-
- Use old disk-only driver on primary interface
-
Depending on what type of IDE interface you have, you may need to
specify additional configuration options. See
Documentation/ide/ide.txt.
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 5da10184d908..2d9da6c40a4d 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -2,11 +2,9 @@
The Linux Kernel API
====================
-Data Types
-==========
-Doubly Linked Lists
--------------------
+List Management Functions
+=========================
.. kernel-doc:: include/linux/list.h
:internal:
@@ -56,11 +54,26 @@ Bitmap Operations
-----------------
.. kernel-doc:: lib/bitmap.c
+ :doc: bitmap introduction
+
+.. kernel-doc:: include/linux/bitmap.h
+ :doc: declare bitmap
+
+.. kernel-doc:: include/linux/bitmap.h
+ :doc: bitmap overview
+
+.. kernel-doc:: include/linux/bitmap.h
+ :doc: bitmap bitops
+
+.. kernel-doc:: lib/bitmap.c
:export:
.. kernel-doc:: lib/bitmap.c
:internal:
+.. kernel-doc:: include/linux/bitmap.h
+ :internal:
+
Command-line Parsing
--------------------
@@ -70,13 +83,16 @@ Command-line Parsing
CRC Functions
-------------
+.. kernel-doc:: lib/crc4.c
+ :export:
+
.. kernel-doc:: lib/crc7.c
:export:
-.. kernel-doc:: lib/crc16.c
+.. kernel-doc:: lib/crc8.c
:export:
-.. kernel-doc:: lib/crc-itu-t.c
+.. kernel-doc:: lib/crc16.c
:export:
.. kernel-doc:: lib/crc32.c
@@ -84,6 +100,9 @@ CRC Functions
.. kernel-doc:: lib/crc-ccitt.c
:export:
+.. kernel-doc:: lib/crc-itu-t.c
+ :export:
+
idr/ida Functions
-----------------
@@ -96,6 +115,30 @@ idr/ida Functions
.. kernel-doc:: lib/idr.c
:export:
+Math Functions in Linux
+=======================
+
+Base 2 log and power Functions
+------------------------------
+
+.. kernel-doc:: include/linux/log2.h
+ :internal:
+
+Division Functions
+------------------
+
+.. kernel-doc:: include/asm-generic/div64.h
+ :functions: do_div
+
+.. kernel-doc:: include/linux/math64.h
+ :internal:
+
+.. kernel-doc:: lib/div64.c
+ :functions: div_s64_rem div64_u64_rem div64_u64 div64_s64
+
+.. kernel-doc:: lib/gcd.c
+ :export:
+
Memory Management in Linux
==========================
diff --git a/Documentation/dev-tools/coccinelle.rst b/Documentation/dev-tools/coccinelle.rst
index 4a64b4c69d3f..37e474ff6911 100644
--- a/Documentation/dev-tools/coccinelle.rst
+++ b/Documentation/dev-tools/coccinelle.rst
@@ -209,7 +209,7 @@ err.log will now have the profiling information, while stdout will
provide some progress information as Coccinelle moves forward with
work.
-DEBUG_FILE support is only supported when using coccinelle >= 1.2.
+DEBUG_FILE support is only supported when using coccinelle >= 1.0.2.
.cocciconfig support
--------------------
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index ebd03d11d2c2..e80850eefe13 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -31,6 +31,17 @@ To build and run the tests with a single command, use::
Note that some tests will require root privileges.
+Build and run from user specific object directory (make O=dir)::
+
+ $ make O=/tmp/kselftest kselftest
+
+Build and run KBUILD_OUTPUT directory (make KBUILD_OUTPUT=)::
+
+ $ make KBUILD_OUTPUT=/tmp/kselftest kselftest
+
+The above commands run the tests and print pass/fail summary to make it
+easier to understand the test results. Please find the detailed individual
+test results for each test in /tmp/testname file(s).
Running a subset of selftests
=============================
@@ -46,10 +57,21 @@ You can specify multiple tests to build and run::
$ make TARGETS="size timers" kselftest
+Build and run from user specific object directory (make O=dir)::
+
+ $ make O=/tmp/kselftest TARGETS="size timers" kselftest
+
+Build and run KBUILD_OUTPUT directory (make KBUILD_OUTPUT=)::
+
+ $ make KBUILD_OUTPUT=/tmp/kselftest TARGETS="size timers" kselftest
+
+The above commands run the tests and print pass/fail summary to make it
+easier to understand the test results. Please find the detailed individual
+test results for each test in /tmp/testname file(s).
+
See the top-level tools/testing/selftests/Makefile for the list of all
possible targets.
-
Running the full range hotplug selftests
========================================
@@ -113,9 +135,17 @@ Contributing new tests (details)
* Use TEST_GEN_XXX if such binaries or files are generated during
compiling.
- TEST_PROGS, TEST_GEN_PROGS mean it is the excutable tested by
+ TEST_PROGS, TEST_GEN_PROGS mean it is the executable tested by
default.
+ TEST_CUSTOM_PROGS should be used by tests that require custom build
+ rule and prevent common build rule use.
+
+ TEST_PROGS are for test shell scripts. Please ensure shell script has
+ its exec bit set. Otherwise, lib.mk run_tests will generate a warning.
+
+ TEST_CUSTOM_PROGS and TEST_PROGS will be run by common run_tests.
+
TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the
executable which is not tested by default.
TEST_FILES, TEST_GEN_FILES mean it is the file which is used by
diff --git a/Documentation/devicetree/bindings/gpio/gpio-fan.txt b/Documentation/devicetree/bindings/hwmon/gpio-fan.txt
index 439a7430fc68..439a7430fc68 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-fan.txt
+++ b/Documentation/devicetree/bindings/hwmon/gpio-fan.txt
diff --git a/Documentation/devicetree/bindings/hwmon/max1619.txt b/Documentation/devicetree/bindings/hwmon/max1619.txt
new file mode 100644
index 000000000000..c70dbbe1e56f
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/max1619.txt
@@ -0,0 +1,12 @@
+Bindings for MAX1619 Temperature Sensor
+
+Required properties:
+- compatible : "maxim,max1619"
+- reg : I2C address, one of 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, or
+ 0x4d, 0x4e
+
+Example:
+ temp@4c {
+ compatible = "maxim,max1619";
+ reg = <0x4c>;
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/max31785.txt b/Documentation/devicetree/bindings/hwmon/max31785.txt
new file mode 100644
index 000000000000..106e08c56aaa
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/max31785.txt
@@ -0,0 +1,22 @@
+Bindings for the Maxim MAX31785 Intelligent Fan Controller
+==========================================================
+
+Reference:
+
+https://datasheets.maximintegrated.com/en/ds/MAX31785.pdf
+
+The Maxim MAX31785 is a PMBus device providing closed-loop, multi-channel fan
+management with temperature and remote voltage sensing. Various fan control
+features are provided, including PWM frequency control, temperature hysteresis,
+dual tachometer measurements, and fan health monitoring.
+
+Required properties:
+- compatible : One of "maxim,max31785" or "maxim,max31785a"
+- reg : I2C address, one of 0x52, 0x53, 0x54, 0x55.
+
+Example:
+
+ fans@52 {
+ compatible = "maxim,max31785";
+ reg = <0x52>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.txt b/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.txt
new file mode 100644
index 000000000000..8765c605e6bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.txt
@@ -0,0 +1,54 @@
+* Amlogic Meson6, Meson8 and Meson8b SDIO/MMC controller
+
+The highspeed MMC host controller on Amlogic SoCs provides an interface
+for MMC, SD, SDIO and SDHC types of memory cards.
+
+Supported maximum speeds are the ones of the eMMC standard 4.41 as well
+as the speed of SD standard 2.0.
+
+The hardware provides an internal "mux" which allows up to three slots
+to be controlled. Only one slot can be accessed at a time.
+
+Required properties:
+ - compatible : must be one of
+ - "amlogic,meson8-sdio"
+ - "amlogic,meson8b-sdio"
+ along with the generic "amlogic,meson-mx-sdio"
+ - reg : mmc controller base registers
+ - interrupts : mmc controller interrupt
+ - #address-cells : must be 1
+ - size-cells : must be 0
+ - clocks : phandle to clock providers
+ - clock-names : must contain "core" and "clkin"
+
+Required child nodes:
+A node for each slot provided by the MMC controller is required.
+NOTE: due to a driver limitation currently only one slot (= child node)
+ is supported!
+
+Required properties on each child node (= slot):
+ - compatible : must be "mmc-slot" (see mmc.txt within this directory)
+ - reg : the slot (or "port") ID
+
+Optional properties on each child node (= slot):
+ - bus-width : must be 1 or 4 (8-bit bus is not supported)
+ - for cd and all other additional generic mmc parameters
+ please refer to mmc.txt within this directory
+
+Examples:
+ mmc@c1108c20 {
+ compatible = "amlogic,meson8-sdio", "amlogic,meson-mx-sdio";
+ reg = <0xc1108c20 0x20>;
+ interrupts = <0 28 1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkc CLKID_SDIO>, <&clkc CLKID_CLK81>;
+ clock-names = "core", "clkin";
+
+ slot@1 {
+ compatible = "mmc-slot";
+ reg = <1>;
+
+ bus-width = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index b32ade645ad9..94a90b49a692 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -53,6 +53,9 @@ Optional properties:
- no-sdio: controller is limited to send sdio cmd during initialization
- no-sd: controller is limited to send sd cmd during initialization
- no-mmc: controller is limited to send mmc cmd during initialization
+- fixed-emmc-driver-type: for non-removable eMMC, enforce this driver type.
+ The value <n> is the driver type as specified in the eMMC specification
+ (table 206 in spec version 5.1).
*NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
polarity properties, we have to fix the meaning of the "normal" and "inverted"
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index 4182ea36ca5b..72d2a734ab85 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -7,10 +7,18 @@ This file documents differences between the core properties in mmc.txt
and the properties used by the msdc driver.
Required properties:
-- compatible: Should be "mediatek,mt8173-mmc","mediatek,mt8135-mmc"
+- compatible: value should be either of the following.
+ "mediatek,mt8135-mmc": for mmc host ip compatible with mt8135
+ "mediatek,mt8173-mmc": for mmc host ip compatible with mt8173
+ "mediatek,mt2701-mmc": for mmc host ip compatible with mt2701
+ "mediatek,mt2712-mmc": for mmc host ip compatible with mt2712
+- reg: physical base address of the controller and length
- interrupts: Should contain MSDC interrupt number
-- clocks: MSDC source clock, HCLK
-- clock-names: "source", "hclk"
+- clocks: Should contain phandle for the clock feeding the MMC controller
+- clock-names: Should contain the following:
+ "source" - source clock (required)
+ "hclk" - HCLK which used for host (required)
+ "source_cg" - independent source clock gate (required for MT2712)
- pinctrl-names: should be "default", "state_uhs"
- pinctrl-0: should contain default/high speed pin ctrl
- pinctrl-1: should contain uhs mode pin ctrl
@@ -30,6 +38,10 @@ Optional properties:
- mediatek,hs400-cmd-resp-sel-rising: HS400 command response sample selection
If present,HS400 command responses are sampled on rising edges.
If not present,HS400 command responses are sampled on falling edges.
+- mediatek,latch-ck: Some SoCs do not support enhance_rx, need set correct latch-ck to avoid data crc
+ error caused by stop clock(fifo full)
+ Valid range = [0:0x7]. if not present, default value is 0.
+ applied to compatible "mediatek,mt2701-mmc".
Examples:
mmc0: mmc@11230000 {
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt b/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt
index de2c53cff4f1..3ee9263adf73 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt
@@ -15,6 +15,8 @@ Required properties:
Optional properties:
- vqmmc-supply: phandle to the regulator device tree node, mentioned
as the VCCQ/VDD_IO supply in the eMMC/SD specs.
+- fujitsu,cmd-dat-delay-select: boolean property indicating that this host
+ requires the CMD_DAT_DELAY control to be enabled.
Example:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 0576264eab5e..bfdcdc4ccdff 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -18,6 +18,8 @@ Required properties:
"core" - SDC MMC clock (MCLK) (required)
"bus" - SDCC bus voter clock (optional)
"xo" - TCXO clock (optional)
+ "cal" - reference clock for RCLK delay calibration (optional)
+ "sleep" - sleep clock for RCLK delay calibration (optional)
Example:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-omap.txt b/Documentation/devicetree/bindings/mmc/sdhci-omap.txt
new file mode 100644
index 000000000000..51775a372c06
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-omap.txt
@@ -0,0 +1,16 @@
+* TI OMAP SDHCI Controller
+
+Refer to mmc.txt for standard MMC bindings.
+
+Required properties:
+- compatible: Should be "ti,dra7-sdhci" for DRA7 and DRA72 controllers
+- ti,hwmods: Must be "mmc<n>", <n> is controller instance starting 1
+
+Example:
+ mmc1: mmc@4809c000 {
+ compatible = "ti,dra7-sdhci";
+ reg = <0x4809c000 0x400>;
+ ti,hwmods = "mmc1";
+ bus-width = <4>;
+ vmmc-supply = <&vmmc>; /* phandle to regulator node */
+ };
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 54ef642f23a0..3c6762430fd9 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -10,7 +10,7 @@ described in mmc.txt, can be used. Additionally the following tmio_mmc-specific
optional bindings can be used.
Required properties:
-- compatible: "renesas,sdhi-shmobile" - a generic sh-mobile SDHI unit
+- compatible: should contain one or more of the following:
"renesas,sdhi-sh73a0" - SDHI IP on SH73A0 SoC
"renesas,sdhi-r7s72100" - SDHI IP on R7S72100 SoC
"renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
@@ -26,6 +26,16 @@ Required properties:
"renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
"renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
"renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
+ "renesas,sdhi-shmobile" - a generic sh-mobile SDHI controller
+ "renesas,rcar-gen1-sdhi" - a generic R-Car Gen1 SDHI controller
+ "renesas,rcar-gen2-sdhi" - a generic R-Car Gen2 or RZ/G1
+ SDHI controller
+ "renesas,rcar-gen3-sdhi" - a generic R-Car Gen3 SDHI controller
+
+
+ When compatible with the generic version, nodes must list
+ the SoC-specific version corresponding to the platform
+ first followed by the generic version.
- clocks: Most controllers only have 1 clock source per channel. However, on
some variations of this controller, the internal card detection
@@ -43,3 +53,61 @@ Optional properties:
- pinctrl-names: should be "default", "state_uhs"
- pinctrl-0: should contain default/high speed pin ctrl
- pinctrl-1: should contain uhs mode pin ctrl
+
+Example: R8A7790 (R-Car H2) SDHI controller nodes
+
+ sdhi0: sd@ee100000 {
+ compatible = "renesas,sdhi-r8a7790", "renesas,rcar-gen2-sdhi";
+ reg = <0 0xee100000 0 0x328>;
+ interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 314>;
+ dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
+ <&dmac1 0xcd>, <&dmac1 0xce>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <195000000>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 314>;
+ status = "disabled";
+ };
+
+ sdhi1: sd@ee120000 {
+ compatible = "renesas,sdhi-r8a7790", "renesas,rcar-gen2-sdhi";
+ reg = <0 0xee120000 0 0x328>;
+ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 313>;
+ dmas = <&dmac0 0xc9>, <&dmac0 0xca>,
+ <&dmac1 0xc9>, <&dmac1 0xca>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <195000000>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 313>;
+ status = "disabled";
+ };
+
+ sdhi2: sd@ee140000 {
+ compatible = "renesas,sdhi-r8a7790", "renesas,rcar-gen2-sdhi";
+ reg = <0 0xee140000 0 0x100>;
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 312>;
+ dmas = <&dmac0 0xc1>, <&dmac0 0xc2>,
+ <&dmac1 0xc1>, <&dmac1 0xc2>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <97500000>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 312>;
+ status = "disabled";
+ };
+
+ sdhi3: sd@ee160000 {
+ compatible = "renesas,sdhi-r8a7790", "renesas,rcar-gen2-sdhi";
+ reg = <0 0xee160000 0 0x100>;
+ interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 311>;
+ dmas = <&dmac0 0xd3>, <&dmac0 0xd4>,
+ <&dmac1 0xd3>, <&dmac1 0xd4>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <97500000>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 311>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/regulator/da9211.txt b/Documentation/devicetree/bindings/regulator/da9211.txt
index 0f2a6f8fcafd..27717e816e71 100644
--- a/Documentation/devicetree/bindings/regulator/da9211.txt
+++ b/Documentation/devicetree/bindings/regulator/da9211.txt
@@ -1,8 +1,9 @@
-* Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 Voltage Regulator
+* Dialog Semiconductor DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
+ Voltage Regulator
Required properties:
-- compatible: "dlg,da9211" or "dlg,da9212" or "dlg,da9213"
- or "dlg,da9214" or "dlg,da9215"
+- compatible: "dlg,da9211" or "dlg,da9212" or "dlg,da9213" or "dlg,da9223"
+ or "dlg,da9214" or "dlg,da9224" or "dlg,da9215" or "dlg,da9225"
- reg: I2C slave address, usually 0x68.
- interrupts: the interrupt outputs of the controller
- regulators: A node that houses a sub-node for each regulator within the
@@ -16,7 +17,6 @@ Optional properties:
- Any optional property defined in regulator.txt
Example 1) DA9211
-
pmic: da9211@68 {
compatible = "dlg,da9211";
reg = <0x68>;
@@ -35,7 +35,6 @@ Example 1) DA9211
};
Example 2) DA9212
-
pmic: da9212@68 {
compatible = "dlg,da9212";
reg = <0x68>;
@@ -79,7 +78,25 @@ Example 3) DA9213
};
};
-Example 4) DA9214
+Example 4) DA9223
+ pmic: da9223@68 {
+ compatible = "dlg,da9223";
+ reg = <0x68>;
+ interrupts = <3 27>;
+
+ regulators {
+ BUCKA {
+ regulator-name = "VBUCKA";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <3000000>;
+ regulator-max-microamp = <6000000>;
+ enable-gpios = <&gpio 27 0>;
+ };
+ };
+ };
+
+Example 5) DA9214
pmic: da9214@68 {
compatible = "dlg,da9214";
reg = <0x68>;
@@ -105,7 +122,33 @@ Example 4) DA9214
};
};
-Example 5) DA9215
+Example 6) DA9224
+ pmic: da9224@68 {
+ compatible = "dlg,da9224";
+ reg = <0x68>;
+ interrupts = <3 27>;
+
+ regulators {
+ BUCKA {
+ regulator-name = "VBUCKA";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <3000000>;
+ regulator-max-microamp = <6000000>;
+ enable-gpios = <&gpio 27 0>;
+ };
+ BUCKB {
+ regulator-name = "VBUCKB";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <3000000>;
+ regulator-max-microamp = <6000000>;
+ enable-gpios = <&gpio 17 0>;
+ };
+ };
+ };
+
+Example 7) DA9215
pmic: da9215@68 {
compatible = "dlg,da9215";
reg = <0x68>;
@@ -131,3 +174,28 @@ Example 5) DA9215
};
};
+Example 8) DA9225
+ pmic: da9225@68 {
+ compatible = "dlg,da9225";
+ reg = <0x68>;
+ interrupts = <3 27>;
+
+ regulators {
+ BUCKA {
+ regulator-name = "VBUCKA";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <4000000>;
+ regulator-max-microamp = <7000000>;
+ enable-gpios = <&gpio 27 0>;
+ };
+ BUCKB {
+ regulator-name = "VBUCKB";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <4000000>;
+ regulator-max-microamp = <7000000>;
+ enable-gpios = <&gpio 17 0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index 444c47831a40..c6dd3f5e485b 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -21,7 +21,7 @@ Each regulator is defined using the standard binding for regulators.
Example 1: PFUZE100
- pmic: pfuze100@08 {
+ pmic: pfuze100@8 {
compatible = "fsl,pfuze100";
reg = <0x08>;
@@ -122,7 +122,7 @@ Example 1: PFUZE100
Example 2: PFUZE200
- pmic: pfuze200@08 {
+ pmic: pfuze200@8 {
compatible = "fsl,pfuze200";
reg = <0x08>;
@@ -216,7 +216,7 @@ Example 2: PFUZE200
Example 3: PFUZE3000
- pmic: pfuze3000@08 {
+ pmic: pfuze3000@8 {
compatible = "fsl,pfuze3000";
reg = <0x08>;
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index 0fa3b0fac129..57d2c65899df 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -8,6 +8,7 @@ Qualcomm SPMI Regulators
"qcom,pm8916-regulators"
"qcom,pm8941-regulators"
"qcom,pm8994-regulators"
+ "qcom,pmi8994-regulators"
- interrupts:
Usage: optional
@@ -100,6 +101,15 @@ Qualcomm SPMI Regulators
Definition: Reference to regulator supplying the input pin, as
described in the data sheet.
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_l1-supply:
+ Usage: optional (pmi8994 only)
+ Value type: <phandle>
+ Definition: Reference to regulator supplying the input pin, as
+ described in the data sheet.
+
The regulator node houses sub-nodes for each regulator within the device. Each
sub-node is identified using the node's name, with valid values listed for each
@@ -122,6 +132,9 @@ pm8994:
l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19, l20,
l21, l22, l23, l24, l25, l26, l27, l28, l29, l30, l31, l32, lvs1, lvs2
+pmi8994:
+ s1, s2, s3, l1
+
The content of each sub-node is defined by the standard binding for regulators -
see regulator.txt - with additional custom properties described below:
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index e865855726a2..bdd83959019c 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -1,7 +1,9 @@
Renesas MSIOF spi controller
Required properties:
-- compatible : "renesas,msiof-r8a7790" (R-Car H2)
+- compatible : "renesas,msiof-r8a7743" (RZ/G1M)
+ "renesas,msiof-r8a7745" (RZ/G1E)
+ "renesas,msiof-r8a7790" (R-Car H2)
"renesas,msiof-r8a7791" (R-Car M2-W)
"renesas,msiof-r8a7792" (R-Car V2H)
"renesas,msiof-r8a7793" (R-Car M2-N)
@@ -10,7 +12,7 @@ Required properties:
"renesas,msiof-r8a7796" (R-Car M3-W)
"renesas,msiof-sh73a0" (SH-Mobile AG5)
"renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
- "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
+ "renesas,rcar-gen2-msiof" (generic R-Car Gen2 and RZ/G1 compatible device)
"renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
"renesas,sh-msiof" (deprecated)
diff --git a/Documentation/devicetree/bindings/spi/spi-davinci.txt b/Documentation/devicetree/bindings/spi/spi-davinci.txt
index f5916c92fe91..1925277bfc1e 100644
--- a/Documentation/devicetree/bindings/spi/spi-davinci.txt
+++ b/Documentation/devicetree/bindings/spi/spi-davinci.txt
@@ -24,6 +24,16 @@ Required properties:
based on a specific SoC configuration.
- interrupts: interrupt number mapped to CPU.
- clocks: spi clk phandle
+ For 66AK2G this property should be set per binding,
+ Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+
+SoC-specific Required Properties:
+
+The following are mandatory properties for Keystone 2 66AK2G SoCs only:
+
+- power-domains: Should contain a phandle to a PM domain provider node
+ and an args specifier containing the SPI device id
+ value. This property is as per the binding,
Optional:
- cs-gpios: gpio chip selects
diff --git a/Documentation/devicetree/bindings/spi/spi-rspi.txt b/Documentation/devicetree/bindings/spi/spi-rspi.txt
index 8f4169f63936..3b02b3a7cfb2 100644
--- a/Documentation/devicetree/bindings/spi/spi-rspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rspi.txt
@@ -5,11 +5,14 @@ Required properties:
"renesas,rspi-<soctype>", "renesas,rspi" as fallback.
For Renesas Serial Peripheral Interface on RZ/A1H:
"renesas,rspi-<soctype>", "renesas,rspi-rz" as fallback.
- For Quad Serial Peripheral Interface on R-Car Gen2:
+ For Quad Serial Peripheral Interface on R-Car Gen2 and
+ RZ/G1 devices:
"renesas,qspi-<soctype>", "renesas,qspi" as fallback.
Examples with soctypes are:
- "renesas,rspi-sh7757" (SH)
- "renesas,rspi-r7s72100" (RZ/A1H)
+ - "renesas,qspi-r8a7743" (RZ/G1M)
+ - "renesas,qspi-r8a7745" (RZ/G1E)
- "renesas,qspi-r8a7790" (R-Car H2)
- "renesas,qspi-r8a7791" (R-Car M2-W)
- "renesas,qspi-r8a7792" (R-Car V2H)
diff --git a/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt b/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt
new file mode 100644
index 000000000000..8de589b376ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt
@@ -0,0 +1,58 @@
+Spreadtrum ADI controller
+
+ADI is the abbreviation of Anolog-Digital interface, which is used to access
+analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
+framework for its hardware implementation is alike to SPI bus and its timing
+is compatile to SPI timing.
+
+ADI controller has 50 channels including 2 software read/write channels and
+48 hardware channels to access analog chip. For 2 software read/write channels,
+users should set ADI registers to access analog chip. For hardware channels,
+we can configure them to allow other hardware components to use it independently,
+which means we can just link one analog chip address to one hardware channel,
+then users can access the mapped analog chip address by this hardware channel
+triggered by hardware components instead of ADI software channels.
+
+Thus we introduce one property named "sprd,hw-channels" to configure hardware
+channels, the first value specifies the hardware channel id which is used to
+transfer data triggered by hardware automatically, and the second value specifies
+the analog chip address where user want to access by hardware components.
+
+Since we have multi-subsystems will use unique ADI to access analog chip, when
+one system is reading/writing data by ADI software channels, that should be under
+one hardware spinlock protection to prevent other systems from reading/writing
+data by ADI software channels at the same time, or two parallel routine of setting
+ADI registers will make ADI controller registers chaos to lead incorrect results.
+Then we need one hardware spinlock to synchronize between the multiple subsystems.
+
+Required properties:
+- compatible: Should be "sprd,sc9860-adi".
+- reg: Offset and length of ADI-SPI controller register space.
+- hwlocks: Reference to a phandle of a hwlock provider node.
+- hwlock-names: Reference to hwlock name strings defined in the same order
+ as the hwlocks, should be "adi".
+- #address-cells: Number of cells required to define a chip select address
+ on the ADI-SPI bus. Should be set to 1.
+- #size-cells: Size of cells required to define a chip select address size
+ on the ADI-SPI bus. Should be set to 0.
+
+Optional properties:
+- sprd,hw-channels: This is an array of channel values up to 49 channels.
+ The first value specifies the hardware channel id which is used to
+ transfer data triggered by hardware automatically, and the second
+ value specifies the analog chip address where user want to access
+ by hardware components.
+
+SPI slave nodes must be children of the SPI controller node and can contain
+properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
+
+Example:
+ adi_bus: spi@40030000 {
+ compatible = "sprd,sc9860-adi";
+ reg = <0 0x40030000 0 0x10000>;
+ hwlocks = <&hwlock1 0>;
+ hwlock-names = "adi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ sprd,hw-channels = <30 0x8c20>;
+ };
diff --git a/Documentation/devicetree/bindings/trivial-devices.txt b/Documentation/devicetree/bindings/trivial-devices.txt
index af284fbd4d23..8bcac6ee73da 100644
--- a/Documentation/devicetree/bindings/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/trivial-devices.txt
@@ -71,6 +71,7 @@ isil,isl29028 Intersil ISL29028 Ambient Light and Proximity Sensor
isil,isl29030 Intersil ISL29030 Ambient Light and Proximity Sensor
maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator
maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
+maxim,max6621 PECI-to-I2C translator for PECI-to-SMBus/I2C protocol conversion
maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
mc,rv3029c2 Real Time Clock Module with I2C-Bus
mcube,mc3230 mCube 3-axis 8-bit digital accelerometer
diff --git a/Documentation/dmaengine/00-INDEX b/Documentation/dmaengine/00-INDEX
deleted file mode 100644
index 07de6573d22b..000000000000
--- a/Documentation/dmaengine/00-INDEX
+++ /dev/null
@@ -1,8 +0,0 @@
-00-INDEX
- - this file.
-client.txt
- -the DMA Engine API Guide.
-dmatest.txt
- - how to compile, configure and use the dmatest system.
-provider.txt
- - the DMA controller API. \ No newline at end of file
diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt
deleted file mode 100644
index c72b4563de10..000000000000
--- a/Documentation/dmaengine/client.txt
+++ /dev/null
@@ -1,222 +0,0 @@
- DMA Engine API Guide
- ====================
-
- Vinod Koul <vinod dot koul at intel.com>
-
-NOTE: For DMA Engine usage in async_tx please see:
- Documentation/crypto/async-tx-api.txt
-
-
-Below is a guide to device driver writers on how to use the Slave-DMA API of the
-DMA Engine. This is applicable only for slave DMA usage only.
-
-The slave DMA usage consists of following steps:
-1. Allocate a DMA slave channel
-2. Set slave and controller specific parameters
-3. Get a descriptor for transaction
-4. Submit the transaction
-5. Issue pending requests and wait for callback notification
-
-1. Allocate a DMA slave channel
-
- Channel allocation is slightly different in the slave DMA context,
- client drivers typically need a channel from a particular DMA
- controller only and even in some cases a specific channel is desired.
- To request a channel dma_request_chan() API is used.
-
- Interface:
- struct dma_chan *dma_request_chan(struct device *dev, const char *name);
-
- Which will find and return the 'name' DMA channel associated with the 'dev'
- device. The association is done via DT, ACPI or board file based
- dma_slave_map matching table.
-
- A channel allocated via this interface is exclusive to the caller,
- until dma_release_channel() is called.
-
-2. Set slave and controller specific parameters
-
- Next step is always to pass some specific information to the DMA
- driver. Most of the generic information which a slave DMA can use
- is in struct dma_slave_config. This allows the clients to specify
- DMA direction, DMA addresses, bus widths, DMA burst lengths etc
- for the peripheral.
-
- If some DMA controllers have more parameters to be sent then they
- should try to embed struct dma_slave_config in their controller
- specific structure. That gives flexibility to client to pass more
- parameters, if required.
-
- Interface:
- int dmaengine_slave_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-
- Please see the dma_slave_config structure definition in dmaengine.h
- for a detailed explanation of the struct members. Please note
- that the 'direction' member will be going away as it duplicates the
- direction given in the prepare call.
-
-3. Get a descriptor for transaction
-
- For slave usage the various modes of slave transfers supported by the
- DMA-engine are:
-
- slave_sg - DMA a list of scatter gather buffers from/to a peripheral
- dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
- operation is explicitly stopped.
- interleaved_dma - This is common to Slave as well as M2M clients. For slave
- address of devices' fifo could be already known to the driver.
- Various types of operations could be expressed by setting
- appropriate values to the 'dma_interleaved_template' members.
-
- A non-NULL return of this transfer API represents a "descriptor" for
- the given transaction.
-
- Interface:
- struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
- unsigned long flags);
-
- struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction);
-
- struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
- struct dma_chan *chan, struct dma_interleaved_template *xt,
- unsigned long flags);
-
- The peripheral driver is expected to have mapped the scatterlist for
- the DMA operation prior to calling dmaengine_prep_slave_sg(), and must
- keep the scatterlist mapped until the DMA operation has completed.
- The scatterlist must be mapped using the DMA struct device.
- If a mapping needs to be synchronized later, dma_sync_*_for_*() must be
- called using the DMA struct device, too.
- So, normal setup should look like this:
-
- nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len);
- if (nr_sg == 0)
- /* error */
-
- desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags);
-
- Once a descriptor has been obtained, the callback information can be
- added and the descriptor must then be submitted. Some DMA engine
- drivers may hold a spinlock between a successful preparation and
- submission so it is important that these two operations are closely
- paired.
-
- Note:
- Although the async_tx API specifies that completion callback
- routines cannot submit any new operations, this is not the
- case for slave/cyclic DMA.
-
- For slave DMA, the subsequent transaction may not be available
- for submission prior to callback function being invoked, so
- slave DMA callbacks are permitted to prepare and submit a new
- transaction.
-
- For cyclic DMA, a callback function may wish to terminate the
- DMA via dmaengine_terminate_async().
-
- Therefore, it is important that DMA engine drivers drop any
- locks before calling the callback function which may cause a
- deadlock.
-
- Note that callbacks will always be invoked from the DMA
- engines tasklet, never from interrupt context.
-
-4. Submit the transaction
-
- Once the descriptor has been prepared and the callback information
- added, it must be placed on the DMA engine drivers pending queue.
-
- Interface:
- dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
-
- This returns a cookie can be used to check the progress of DMA engine
- activity via other DMA engine calls not covered in this document.
-
- dmaengine_submit() will not start the DMA operation, it merely adds
- it to the pending queue. For this, see step 5, dma_async_issue_pending.
-
-5. Issue pending DMA requests and wait for callback notification
-
- The transactions in the pending queue can be activated by calling the
- issue_pending API. If channel is idle then the first transaction in
- queue is started and subsequent ones queued up.
-
- On completion of each DMA operation, the next in queue is started and
- a tasklet triggered. The tasklet will then call the client driver
- completion callback routine for notification, if set.
-
- Interface:
- void dma_async_issue_pending(struct dma_chan *chan);
-
-Further APIs:
-
-1. int dmaengine_terminate_sync(struct dma_chan *chan)
- int dmaengine_terminate_async(struct dma_chan *chan)
- int dmaengine_terminate_all(struct dma_chan *chan) /* DEPRECATED */
-
- This causes all activity for the DMA channel to be stopped, and may
- discard data in the DMA FIFO which hasn't been fully transferred.
- No callback functions will be called for any incomplete transfers.
-
- Two variants of this function are available.
-
- dmaengine_terminate_async() might not wait until the DMA has been fully
- stopped or until any running complete callbacks have finished. But it is
- possible to call dmaengine_terminate_async() from atomic context or from
- within a complete callback. dmaengine_synchronize() must be called before it
- is safe to free the memory accessed by the DMA transfer or free resources
- accessed from within the complete callback.
-
- dmaengine_terminate_sync() will wait for the transfer and any running
- complete callbacks to finish before it returns. But the function must not be
- called from atomic context or from within a complete callback.
-
- dmaengine_terminate_all() is deprecated and should not be used in new code.
-
-2. int dmaengine_pause(struct dma_chan *chan)
-
- This pauses activity on the DMA channel without data loss.
-
-3. int dmaengine_resume(struct dma_chan *chan)
-
- Resume a previously paused DMA channel. It is invalid to resume a
- channel which is not currently paused.
-
-4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
-
- This can be used to check the status of the channel. Please see
- the documentation in include/linux/dmaengine.h for a more complete
- description of this API.
-
- This can be used in conjunction with dma_async_is_complete() and
- the cookie returned from dmaengine_submit() to check for
- completion of a specific DMA transaction.
-
- Note:
- Not all DMA engine drivers can return reliable information for
- a running DMA channel. It is recommended that DMA engine users
- pause or stop (via dmaengine_terminate_all()) the channel before
- using this API.
-
-5. void dmaengine_synchronize(struct dma_chan *chan)
-
- Synchronize the termination of the DMA channel to the current context.
-
- This function should be used after dmaengine_terminate_async() to synchronize
- the termination of the DMA channel to the current context. The function will
- wait for the transfer and any running complete callbacks to finish before it
- returns.
-
- If dmaengine_terminate_async() is used to stop the DMA channel this function
- must be called before it is safe to free memory accessed by previously
- submitted descriptors or to free any resources accessed within the complete
- callback of previously submitted descriptors.
-
- The behavior of this function is undefined if dma_async_issue_pending() has
- been called between dmaengine_terminate_async() and this function.
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
deleted file mode 100644
index 5dbe054a40ad..000000000000
--- a/Documentation/dmaengine/provider.txt
+++ /dev/null
@@ -1,424 +0,0 @@
-DMAengine controller documentation
-==================================
-
-Hardware Introduction
-+++++++++++++++++++++
-
-Most of the Slave DMA controllers have the same general principles of
-operations.
-
-They have a given number of channels to use for the DMA transfers, and
-a given number of requests lines.
-
-Requests and channels are pretty much orthogonal. Channels can be used
-to serve several to any requests. To simplify, channels are the
-entities that will be doing the copy, and requests what endpoints are
-involved.
-
-The request lines actually correspond to physical lines going from the
-DMA-eligible devices to the controller itself. Whenever the device
-will want to start a transfer, it will assert a DMA request (DRQ) by
-asserting that request line.
-
-A very simple DMA controller would only take into account a single
-parameter: the transfer size. At each clock cycle, it would transfer a
-byte of data from one buffer to another, until the transfer size has
-been reached.
-
-That wouldn't work well in the real world, since slave devices might
-require a specific number of bits to be transferred in a single
-cycle. For example, we may want to transfer as much data as the
-physical bus allows to maximize performances when doing a simple
-memory copy operation, but our audio device could have a narrower FIFO
-that requires data to be written exactly 16 or 24 bits at a time. This
-is why most if not all of the DMA controllers can adjust this, using a
-parameter called the transfer width.
-
-Moreover, some DMA controllers, whenever the RAM is used as a source
-or destination, can group the reads or writes in memory into a buffer,
-so instead of having a lot of small memory accesses, which is not
-really efficient, you'll get several bigger transfers. This is done
-using a parameter called the burst size, that defines how many single
-reads/writes it's allowed to do without the controller splitting the
-transfer into smaller sub-transfers.
-
-Our theoretical DMA controller would then only be able to do transfers
-that involve a single contiguous block of data. However, some of the
-transfers we usually have are not, and want to copy data from
-non-contiguous buffers to a contiguous buffer, which is called
-scatter-gather.
-
-DMAEngine, at least for mem2dev transfers, require support for
-scatter-gather. So we're left with two cases here: either we have a
-quite simple DMA controller that doesn't support it, and we'll have to
-implement it in software, or we have a more advanced DMA controller,
-that implements in hardware scatter-gather.
-
-The latter are usually programmed using a collection of chunks to
-transfer, and whenever the transfer is started, the controller will go
-over that collection, doing whatever we programmed there.
-
-This collection is usually either a table or a linked list. You will
-then push either the address of the table and its number of elements,
-or the first item of the list to one channel of the DMA controller,
-and whenever a DRQ will be asserted, it will go through the collection
-to know where to fetch the data from.
-
-Either way, the format of this collection is completely dependent on
-your hardware. Each DMA controller will require a different structure,
-but all of them will require, for every chunk, at least the source and
-destination addresses, whether it should increment these addresses or
-not and the three parameters we saw earlier: the burst size, the
-transfer width and the transfer size.
-
-The one last thing is that usually, slave devices won't issue DRQ by
-default, and you have to enable this in your slave device driver first
-whenever you're willing to use DMA.
-
-These were just the general memory-to-memory (also called mem2mem) or
-memory-to-device (mem2dev) kind of transfers. Most devices often
-support other kind of transfers or memory operations that dmaengine
-support and will be detailed later in this document.
-
-DMA Support in Linux
-++++++++++++++++++++
-
-Historically, DMA controller drivers have been implemented using the
-async TX API, to offload operations such as memory copy, XOR,
-cryptography, etc., basically any memory to memory operation.
-
-Over time, the need for memory to device transfers arose, and
-dmaengine was extended. Nowadays, the async TX API is written as a
-layer on top of dmaengine, and acts as a client. Still, dmaengine
-accommodates that API in some cases, and made some design choices to
-ensure that it stayed compatible.
-
-For more information on the Async TX API, please look the relevant
-documentation file in Documentation/crypto/async-tx-api.txt.
-
-DMAEngine Registration
-++++++++++++++++++++++
-
-struct dma_device Initialization
---------------------------------
-
-Just like any other kernel framework, the whole DMAEngine registration
-relies on the driver filling a structure and registering against the
-framework. In our case, that structure is dma_device.
-
-The first thing you need to do in your driver is to allocate this
-structure. Any of the usual memory allocators will do, but you'll also
-need to initialize a few fields in there:
-
- * channels: should be initialized as a list using the
- INIT_LIST_HEAD macro for example
-
- * src_addr_widths:
- - should contain a bitmask of the supported source transfer width
-
- * dst_addr_widths:
- - should contain a bitmask of the supported destination transfer
- width
-
- * directions:
- - should contain a bitmask of the supported slave directions
- (i.e. excluding mem2mem transfers)
-
- * residue_granularity:
- - Granularity of the transfer residue reported to dma_set_residue.
- - This can be either:
- + Descriptor
- -> Your device doesn't support any kind of residue
- reporting. The framework will only know that a particular
- transaction descriptor is done.
- + Segment
- -> Your device is able to report which chunks have been
- transferred
- + Burst
- -> Your device is able to report which burst have been
- transferred
-
- * dev: should hold the pointer to the struct device associated
- to your current driver instance.
-
-Supported transaction types
----------------------------
-
-The next thing you need is to set which transaction types your device
-(and driver) supports.
-
-Our dma_device structure has a field called cap_mask that holds the
-various types of transaction supported, and you need to modify this
-mask using the dma_cap_set function, with various flags depending on
-transaction types you support as an argument.
-
-All those capabilities are defined in the dma_transaction_type enum,
-in include/linux/dmaengine.h
-
-Currently, the types available are:
- * DMA_MEMCPY
- - The device is able to do memory to memory copies
-
- * DMA_XOR
- - The device is able to perform XOR operations on memory areas
- - Used to accelerate XOR intensive tasks, such as RAID5
-
- * DMA_XOR_VAL
- - The device is able to perform parity check using the XOR
- algorithm against a memory buffer.
-
- * DMA_PQ
- - The device is able to perform RAID6 P+Q computations, P being a
- simple XOR, and Q being a Reed-Solomon algorithm.
-
- * DMA_PQ_VAL
- - The device is able to perform parity check using RAID6 P+Q
- algorithm against a memory buffer.
-
- * DMA_INTERRUPT
- - The device is able to trigger a dummy transfer that will
- generate periodic interrupts
- - Used by the client drivers to register a callback that will be
- called on a regular basis through the DMA controller interrupt
-
- * DMA_PRIVATE
- - The devices only supports slave transfers, and as such isn't
- available for async transfers.
-
- * DMA_ASYNC_TX
- - Must not be set by the device, and will be set by the framework
- if needed
- - /* TODO: What is it about? */
-
- * DMA_SLAVE
- - The device can handle device to memory transfers, including
- scatter-gather transfers.
- - While in the mem2mem case we were having two distinct types to
- deal with a single chunk to copy or a collection of them, here,
- we just have a single transaction type that is supposed to
- handle both.
- - If you want to transfer a single contiguous memory buffer,
- simply build a scatter list with only one item.
-
- * DMA_CYCLIC
- - The device can handle cyclic transfers.
- - A cyclic transfer is a transfer where the chunk collection will
- loop over itself, with the last item pointing to the first.
- - It's usually used for audio transfers, where you want to operate
- on a single ring buffer that you will fill with your audio data.
-
- * DMA_INTERLEAVE
- - The device supports interleaved transfer.
- - These transfers can transfer data from a non-contiguous buffer
- to a non-contiguous buffer, opposed to DMA_SLAVE that can
- transfer data from a non-contiguous data set to a continuous
- destination buffer.
- - It's usually used for 2d content transfers, in which case you
- want to transfer a portion of uncompressed data directly to the
- display to print it
-
-These various types will also affect how the source and destination
-addresses change over time.
-
-Addresses pointing to RAM are typically incremented (or decremented)
-after each transfer. In case of a ring buffer, they may loop
-(DMA_CYCLIC). Addresses pointing to a device's register (e.g. a FIFO)
-are typically fixed.
-
-Device operations
------------------
-
-Our dma_device structure also requires a few function pointers in
-order to implement the actual logic, now that we described what
-operations we were able to perform.
-
-The functions that we have to fill in there, and hence have to
-implement, obviously depend on the transaction types you reported as
-supported.
-
- * device_alloc_chan_resources
- * device_free_chan_resources
- - These functions will be called whenever a driver will call
- dma_request_channel or dma_release_channel for the first/last
- time on the channel associated to that driver.
- - They are in charge of allocating/freeing all the needed
- resources in order for that channel to be useful for your
- driver.
- - These functions can sleep.
-
- * device_prep_dma_*
- - These functions are matching the capabilities you registered
- previously.
- - These functions all take the buffer or the scatterlist relevant
- for the transfer being prepared, and should create a hardware
- descriptor or a list of hardware descriptors from it
- - These functions can be called from an interrupt context
- - Any allocation you might do should be using the GFP_NOWAIT
- flag, in order not to potentially sleep, but without depleting
- the emergency pool either.
- - Drivers should try to pre-allocate any memory they might need
- during the transfer setup at probe time to avoid putting to
- much pressure on the nowait allocator.
-
- - It should return a unique instance of the
- dma_async_tx_descriptor structure, that further represents this
- particular transfer.
-
- - This structure can be initialized using the function
- dma_async_tx_descriptor_init.
- - You'll also need to set two fields in this structure:
- + flags:
- TODO: Can it be modified by the driver itself, or
- should it be always the flags passed in the arguments
-
- + tx_submit: A pointer to a function you have to implement,
- that is supposed to push the current
- transaction descriptor to a pending queue, waiting
- for issue_pending to be called.
- - In this structure the function pointer callback_result can be
- initialized in order for the submitter to be notified that a
- transaction has completed. In the earlier code the function pointer
- callback has been used. However it does not provide any status to the
- transaction and will be deprecated. The result structure defined as
- dmaengine_result that is passed in to callback_result has two fields:
- + result: This provides the transfer result defined by
- dmaengine_tx_result. Either success or some error
- condition.
- + residue: Provides the residue bytes of the transfer for those that
- support residue.
-
- * device_issue_pending
- - Takes the first transaction descriptor in the pending queue,
- and starts the transfer. Whenever that transfer is done, it
- should move to the next transaction in the list.
- - This function can be called in an interrupt context
-
- * device_tx_status
- - Should report the bytes left to go over on the given channel
- - Should only care about the transaction descriptor passed as
- argument, not the currently active one on a given channel
- - The tx_state argument might be NULL
- - Should use dma_set_residue to report it
- - In the case of a cyclic transfer, it should only take into
- account the current period.
- - This function can be called in an interrupt context.
-
- * device_config
- - Reconfigures the channel with the configuration given as
- argument
- - This command should NOT perform synchronously, or on any
- currently queued transfers, but only on subsequent ones
- - In this case, the function will receive a dma_slave_config
- structure pointer as an argument, that will detail which
- configuration to use.
- - Even though that structure contains a direction field, this
- field is deprecated in favor of the direction argument given to
- the prep_* functions
- - This call is mandatory for slave operations only. This should NOT be
- set or expected to be set for memcpy operations.
- If a driver support both, it should use this call for slave
- operations only and not for memcpy ones.
-
- * device_pause
- - Pauses a transfer on the channel
- - This command should operate synchronously on the channel,
- pausing right away the work of the given channel
-
- * device_resume
- - Resumes a transfer on the channel
- - This command should operate synchronously on the channel,
- resuming right away the work of the given channel
-
- * device_terminate_all
- - Aborts all the pending and ongoing transfers on the channel
- - For aborted transfers the complete callback should not be called
- - Can be called from atomic context or from within a complete
- callback of a descriptor. Must not sleep. Drivers must be able
- to handle this correctly.
- - Termination may be asynchronous. The driver does not have to
- wait until the currently active transfer has completely stopped.
- See device_synchronize.
-
- * device_synchronize
- - Must synchronize the termination of a channel to the current
- context.
- - Must make sure that memory for previously submitted
- descriptors is no longer accessed by the DMA controller.
- - Must make sure that all complete callbacks for previously
- submitted descriptors have finished running and none are
- scheduled to run.
- - May sleep.
-
-
-Misc notes (stuff that should be documented, but don't really know
-where to put them)
-------------------------------------------------------------------
- * dma_run_dependencies
- - Should be called at the end of an async TX transfer, and can be
- ignored in the slave transfers case.
- - Makes sure that dependent operations are run before marking it
- as complete.
-
- * dma_cookie_t
- - it's a DMA transaction ID that will increment over time.
- - Not really relevant any more since the introduction of virt-dma
- that abstracts it away.
-
- * DMA_CTRL_ACK
- - If clear, the descriptor cannot be reused by provider until the
- client acknowledges receipt, i.e. has has a chance to establish any
- dependency chains
- - This can be acked by invoking async_tx_ack()
- - If set, does not mean descriptor can be reused
-
- * DMA_CTRL_REUSE
- - If set, the descriptor can be reused after being completed. It should
- not be freed by provider if this flag is set.
- - The descriptor should be prepared for reuse by invoking
- dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE.
- - dmaengine_desc_set_reuse() will succeed only when channel support
- reusable descriptor as exhibited by capabilities
- - As a consequence, if a device driver wants to skip the dma_map_sg() and
- dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
- it can resubmit the transfer right after its completion.
- - Descriptor can be freed in few ways
- - Clearing DMA_CTRL_REUSE by invoking dmaengine_desc_clear_reuse()
- and submitting for last txn
- - Explicitly invoking dmaengine_desc_free(), this can succeed only
- when DMA_CTRL_REUSE is already set
- - Terminating the channel
-
- * DMA_PREP_CMD
- - If set, the client driver tells DMA controller that passed data in DMA
- API is command data.
- - Interpretation of command data is DMA controller specific. It can be
- used for issuing commands to other peripherals/register reads/register
- writes for which the descriptor should be in different format from
- normal data descriptors.
-
-General Design Notes
---------------------
-
-Most of the DMAEngine drivers you'll see are based on a similar design
-that handles the end of transfer interrupts in the handler, but defer
-most work to a tasklet, including the start of a new transfer whenever
-the previous transfer ended.
-
-This is a rather inefficient design though, because the inter-transfer
-latency will be not only the interrupt latency, but also the
-scheduling latency of the tasklet, which will leave the channel idle
-in between, which will slow down the global transfer rate.
-
-You should avoid this kind of practice, and instead of electing a new
-transfer in your tasklet, move that part to the interrupt handler in
-order to have a shorter idle window (that we can't really avoid
-anyway).
-
-Glossary
---------
-
-Burst: A number of consecutive read or write operations
- that can be queued to buffers before being flushed to
- memory.
-Chunk: A contiguous collection of bursts
-Transfer: A collection of chunks (be it contiguous or not)
diff --git a/Documentation/dmaengine/pxa_dma.txt b/Documentation/dmaengine/pxa_dma.txt
deleted file mode 100644
index 0736d44b5438..000000000000
--- a/Documentation/dmaengine/pxa_dma.txt
+++ /dev/null
@@ -1,153 +0,0 @@
-PXA/MMP - DMA Slave controller
-==============================
-
-Constraints
------------
- a) Transfers hot queuing
- A driver submitting a transfer and issuing it should be granted the transfer
- is queued even on a running DMA channel.
- This implies that the queuing doesn't wait for the previous transfer end,
- and that the descriptor chaining is not only done in the irq/tasklet code
- triggered by the end of the transfer.
- A transfer which is submitted and issued on a phy doesn't wait for a phy to
- stop and restart, but is submitted on a "running channel". The other
- drivers, especially mmp_pdma waited for the phy to stop before relaunching
- a new transfer.
-
- b) All transfers having asked for confirmation should be signaled
- Any issued transfer with DMA_PREP_INTERRUPT should trigger a callback call.
- This implies that even if an irq/tasklet is triggered by end of tx1, but
- at the time of irq/dma tx2 is already finished, tx1->complete() and
- tx2->complete() should be called.
-
- c) Channel running state
- A driver should be able to query if a channel is running or not. For the
- multimedia case, such as video capture, if a transfer is submitted and then
- a check of the DMA channel reports a "stopped channel", the transfer should
- not be issued until the next "start of frame interrupt", hence the need to
- know if a channel is in running or stopped state.
-
- d) Bandwidth guarantee
- The PXA architecture has 4 levels of DMAs priorities : high, normal, low.
- The high priorities get twice as much bandwidth as the normal, which get twice
- as much as the low priorities.
- A driver should be able to request a priority, especially the real-time
- ones such as pxa_camera with (big) throughputs.
-
-Design
-------
- a) Virtual channels
- Same concept as in sa11x0 driver, ie. a driver was assigned a "virtual
- channel" linked to the requestor line, and the physical DMA channel is
- assigned on the fly when the transfer is issued.
-
- b) Transfer anatomy for a scatter-gather transfer
- +------------+-----+---------------+----------------+-----------------+
- | desc-sg[0] | ... | desc-sg[last] | status updater | finisher/linker |
- +------------+-----+---------------+----------------+-----------------+
-
- This structure is pointed by dma->sg_cpu.
- The descriptors are used as follows :
- - desc-sg[i]: i-th descriptor, transferring the i-th sg
- element to the video buffer scatter gather
- - status updater
- Transfers a single u32 to a well known dma coherent memory to leave
- a trace that this transfer is done. The "well known" is unique per
- physical channel, meaning that a read of this value will tell which
- is the last finished transfer at that point in time.
- - finisher: has ddadr=DADDR_STOP, dcmd=ENDIRQEN
- - linker: has ddadr= desc-sg[0] of next transfer, dcmd=0
-
- c) Transfers hot-chaining
- Suppose the running chain is :
- Buffer 1 Buffer 2
- +---------+----+---+ +----+----+----+---+
- | d0 | .. | dN | l | | d0 | .. | dN | f |
- +---------+----+-|-+ ^----+----+----+---+
- | |
- +----+
-
- After a call to dmaengine_submit(b3), the chain will look like :
- Buffer 1 Buffer 2 Buffer 3
- +---------+----+---+ +----+----+----+---+ +----+----+----+---+
- | d0 | .. | dN | l | | d0 | .. | dN | l | | d0 | .. | dN | f |
- +---------+----+-|-+ ^----+----+----+-|-+ ^----+----+----+---+
- | | | |
- +----+ +----+
- new_link
-
- If while new_link was created the DMA channel stopped, it is _not_
- restarted. Hot-chaining doesn't break the assumption that
- dma_async_issue_pending() is to be used to ensure the transfer is actually started.
-
- One exception to this rule :
- - if Buffer1 and Buffer2 had all their addresses 8 bytes aligned
- - and if Buffer3 has at least one address not 4 bytes aligned
- - then hot-chaining cannot happen, as the channel must be stopped, the
- "align bit" must be set, and the channel restarted As a consequence,
- such a transfer tx_submit() will be queued on the submitted queue, and
- this specific case if the DMA is already running in aligned mode.
-
- d) Transfers completion updater
- Each time a transfer is completed on a channel, an interrupt might be
- generated or not, up to the client's request. But in each case, the last
- descriptor of a transfer, the "status updater", will write the latest
- transfer being completed into the physical channel's completion mark.
-
- This will speed up residue calculation, for large transfers such as video
- buffers which hold around 6k descriptors or more. This also allows without
- any lock to find out what is the latest completed transfer in a running
- DMA chain.
-
- e) Transfers completion, irq and tasklet
- When a transfer flagged as "DMA_PREP_INTERRUPT" is finished, the dma irq
- is raised. Upon this interrupt, a tasklet is scheduled for the physical
- channel.
- The tasklet is responsible for :
- - reading the physical channel last updater mark
- - calling all the transfer callbacks of finished transfers, based on
- that mark, and each transfer flags.
- If a transfer is completed while this handling is done, a dma irq will
- be raised, and the tasklet will be scheduled once again, having a new
- updater mark.
-
- f) Residue
- Residue granularity will be descriptor based. The issued but not completed
- transfers will be scanned for all of their descriptors against the
- currently running descriptor.
-
- g) Most complicated case of driver's tx queues
- The most tricky situation is when :
- - there are not "acked" transfers (tx0)
- - a driver submitted an aligned tx1, not chained
- - a driver submitted an aligned tx2 => tx2 is cold chained to tx1
- - a driver issued tx1+tx2 => channel is running in aligned mode
- - a driver submitted an aligned tx3 => tx3 is hot-chained
- - a driver submitted an unaligned tx4 => tx4 is put in submitted queue,
- not chained
- - a driver issued tx4 => tx4 is put in issued queue, not chained
- - a driver submitted an aligned tx5 => tx5 is put in submitted queue, not
- chained
- - a driver submitted an aligned tx6 => tx6 is put in submitted queue,
- cold chained to tx5
-
- This translates into (after tx4 is issued) :
- - issued queue
- +-----+ +-----+ +-----+ +-----+
- | tx1 | | tx2 | | tx3 | | tx4 |
- +---|-+ ^---|-+ ^-----+ +-----+
- | | | |
- +---+ +---+
- - submitted queue
- +-----+ +-----+
- | tx5 | | tx6 |
- +---|-+ ^-----+
- | |
- +---+
- - completed queue : empty
- - allocated queue : tx0
-
- It should be noted that after tx3 is completed, the channel is stopped, and
- restarted in "unaligned mode" to handle tx4.
-
-Author: Robert Jarzmik <robert.jarzmik@free.fr>
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index b24854b5d6be..0268335414ce 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -65,7 +65,7 @@ Without options, the kernel-doc directive includes all documentation comments
from the source file.
The kernel-doc extension is included in the kernel source tree, at
-``Documentation/sphinx/kernel-doc.py``. Internally, it uses the
+``Documentation/sphinx/kerneldoc.py``. Internally, it uses the
``scripts/kernel-doc`` script to extract the documentation comments from the
source.
diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst
new file mode 100644
index 000000000000..6245c99af8c1
--- /dev/null
+++ b/Documentation/driver-api/dmaengine/client.rst
@@ -0,0 +1,275 @@
+====================
+DMA Engine API Guide
+====================
+
+Vinod Koul <vinod dot koul at intel.com>
+
+.. note:: For DMA Engine usage in async_tx please see:
+ ``Documentation/crypto/async-tx-api.txt``
+
+
+Below is a guide to device driver writers on how to use the Slave-DMA API of the
+DMA Engine. This is applicable only for slave DMA usage only.
+
+DMA usage
+=========
+
+The slave DMA usage consists of following steps:
+
+- Allocate a DMA slave channel
+
+- Set slave and controller specific parameters
+
+- Get a descriptor for transaction
+
+- Submit the transaction
+
+- Issue pending requests and wait for callback notification
+
+The details of these operations are:
+
+1. Allocate a DMA slave channel
+
+ Channel allocation is slightly different in the slave DMA context,
+ client drivers typically need a channel from a particular DMA
+ controller only and even in some cases a specific channel is desired.
+ To request a channel dma_request_chan() API is used.
+
+ Interface:
+
+ .. code-block:: c
+
+ struct dma_chan *dma_request_chan(struct device *dev, const char *name);
+
+ Which will find and return the ``name`` DMA channel associated with the 'dev'
+ device. The association is done via DT, ACPI or board file based
+ dma_slave_map matching table.
+
+ A channel allocated via this interface is exclusive to the caller,
+ until dma_release_channel() is called.
+
+2. Set slave and controller specific parameters
+
+ Next step is always to pass some specific information to the DMA
+ driver. Most of the generic information which a slave DMA can use
+ is in struct dma_slave_config. This allows the clients to specify
+ DMA direction, DMA addresses, bus widths, DMA burst lengths etc
+ for the peripheral.
+
+ If some DMA controllers have more parameters to be sent then they
+ should try to embed struct dma_slave_config in their controller
+ specific structure. That gives flexibility to client to pass more
+ parameters, if required.
+
+ Interface:
+
+ .. code-block:: c
+
+ int dmaengine_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+
+ Please see the dma_slave_config structure definition in dmaengine.h
+ for a detailed explanation of the struct members. Please note
+ that the 'direction' member will be going away as it duplicates the
+ direction given in the prepare call.
+
+3. Get a descriptor for transaction
+
+ For slave usage the various modes of slave transfers supported by the
+ DMA-engine are:
+
+ - slave_sg: DMA a list of scatter gather buffers from/to a peripheral
+
+ - dma_cyclic: Perform a cyclic DMA operation from/to a peripheral till the
+ operation is explicitly stopped.
+
+ - interleaved_dma: This is common to Slave as well as M2M clients. For slave
+ address of devices' fifo could be already known to the driver.
+ Various types of operations could be expressed by setting
+ appropriate values to the 'dma_interleaved_template' members.
+
+ A non-NULL return of this transfer API represents a "descriptor" for
+ the given transaction.
+
+ Interface:
+
+ .. code-block:: c
+
+ struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags);
+
+ struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction);
+
+ struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags);
+
+ The peripheral driver is expected to have mapped the scatterlist for
+ the DMA operation prior to calling dmaengine_prep_slave_sg(), and must
+ keep the scatterlist mapped until the DMA operation has completed.
+ The scatterlist must be mapped using the DMA struct device.
+ If a mapping needs to be synchronized later, dma_sync_*_for_*() must be
+ called using the DMA struct device, too.
+ So, normal setup should look like this:
+
+ .. code-block:: c
+
+ nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len);
+ if (nr_sg == 0)
+ /* error */
+
+ desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags);
+
+ Once a descriptor has been obtained, the callback information can be
+ added and the descriptor must then be submitted. Some DMA engine
+ drivers may hold a spinlock between a successful preparation and
+ submission so it is important that these two operations are closely
+ paired.
+
+ .. note::
+
+ Although the async_tx API specifies that completion callback
+ routines cannot submit any new operations, this is not the
+ case for slave/cyclic DMA.
+
+ For slave DMA, the subsequent transaction may not be available
+ for submission prior to callback function being invoked, so
+ slave DMA callbacks are permitted to prepare and submit a new
+ transaction.
+
+ For cyclic DMA, a callback function may wish to terminate the
+ DMA via dmaengine_terminate_async().
+
+ Therefore, it is important that DMA engine drivers drop any
+ locks before calling the callback function which may cause a
+ deadlock.
+
+ Note that callbacks will always be invoked from the DMA
+ engines tasklet, never from interrupt context.
+
+4. Submit the transaction
+
+ Once the descriptor has been prepared and the callback information
+ added, it must be placed on the DMA engine drivers pending queue.
+
+ Interface:
+
+ .. code-block:: c
+
+ dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
+
+ This returns a cookie can be used to check the progress of DMA engine
+ activity via other DMA engine calls not covered in this document.
+
+ dmaengine_submit() will not start the DMA operation, it merely adds
+ it to the pending queue. For this, see step 5, dma_async_issue_pending.
+
+5. Issue pending DMA requests and wait for callback notification
+
+ The transactions in the pending queue can be activated by calling the
+ issue_pending API. If channel is idle then the first transaction in
+ queue is started and subsequent ones queued up.
+
+ On completion of each DMA operation, the next in queue is started and
+ a tasklet triggered. The tasklet will then call the client driver
+ completion callback routine for notification, if set.
+
+ Interface:
+
+ .. code-block:: c
+
+ void dma_async_issue_pending(struct dma_chan *chan);
+
+Further APIs:
+------------
+
+1. Terminate APIs
+
+ .. code-block:: c
+
+ int dmaengine_terminate_sync(struct dma_chan *chan)
+ int dmaengine_terminate_async(struct dma_chan *chan)
+ int dmaengine_terminate_all(struct dma_chan *chan) /* DEPRECATED */
+
+ This causes all activity for the DMA channel to be stopped, and may
+ discard data in the DMA FIFO which hasn't been fully transferred.
+ No callback functions will be called for any incomplete transfers.
+
+ Two variants of this function are available.
+
+ dmaengine_terminate_async() might not wait until the DMA has been fully
+ stopped or until any running complete callbacks have finished. But it is
+ possible to call dmaengine_terminate_async() from atomic context or from
+ within a complete callback. dmaengine_synchronize() must be called before it
+ is safe to free the memory accessed by the DMA transfer or free resources
+ accessed from within the complete callback.
+
+ dmaengine_terminate_sync() will wait for the transfer and any running
+ complete callbacks to finish before it returns. But the function must not be
+ called from atomic context or from within a complete callback.
+
+ dmaengine_terminate_all() is deprecated and should not be used in new code.
+
+2. Pause API
+
+ .. code-block:: c
+
+ int dmaengine_pause(struct dma_chan *chan)
+
+ This pauses activity on the DMA channel without data loss.
+
+3. Resume API
+
+ .. code-block:: c
+
+ int dmaengine_resume(struct dma_chan *chan)
+
+ Resume a previously paused DMA channel. It is invalid to resume a
+ channel which is not currently paused.
+
+4. Check Txn complete
+
+ .. code-block:: c
+
+ enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+
+ This can be used to check the status of the channel. Please see
+ the documentation in include/linux/dmaengine.h for a more complete
+ description of this API.
+
+ This can be used in conjunction with dma_async_is_complete() and
+ the cookie returned from dmaengine_submit() to check for
+ completion of a specific DMA transaction.
+
+ .. note::
+
+ Not all DMA engine drivers can return reliable information for
+ a running DMA channel. It is recommended that DMA engine users
+ pause or stop (via dmaengine_terminate_all()) the channel before
+ using this API.
+
+5. Synchronize termination API
+
+ .. code-block:: c
+
+ void dmaengine_synchronize(struct dma_chan *chan)
+
+ Synchronize the termination of the DMA channel to the current context.
+
+ This function should be used after dmaengine_terminate_async() to synchronize
+ the termination of the DMA channel to the current context. The function will
+ wait for the transfer and any running complete callbacks to finish before it
+ returns.
+
+ If dmaengine_terminate_async() is used to stop the DMA channel this function
+ must be called before it is safe to free memory accessed by previously
+ submitted descriptors or to free any resources accessed within the complete
+ callback of previously submitted descriptors.
+
+ The behavior of this function is undefined if dma_async_issue_pending() has
+ been called between dmaengine_terminate_async() and this function.
diff --git a/Documentation/dmaengine/dmatest.txt b/Documentation/driver-api/dmaengine/dmatest.rst
index fb683c72dea8..3922c0a3f0c0 100644
--- a/Documentation/dmaengine/dmatest.txt
+++ b/Documentation/driver-api/dmaengine/dmatest.rst
@@ -1,11 +1,13 @@
- DMA Test Guide
- ==============
+==============
+DMA Test Guide
+==============
- Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Andy Shevchenko <andriy.shevchenko@linux.intel.com>
This small document introduces how to test DMA drivers using dmatest module.
- Part 1 - How to build the test module
+Part 1 - How to build the test module
+=====================================
The menuconfig contains an option that could be found by following path:
Device Drivers -> DMA Engine support -> DMA Test client
@@ -13,25 +15,31 @@ The menuconfig contains an option that could be found by following path:
In the configuration file the option called CONFIG_DMATEST. The dmatest could
be built as module or inside kernel. Let's consider those cases.
- Part 2 - When dmatest is built as a module...
+Part 2 - When dmatest is built as a module
+==========================================
-Example of usage:
- % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1
+Example of usage: ::
-...or:
- % modprobe dmatest
- % echo dma0chan0 > /sys/module/dmatest/parameters/channel
- % echo 2000 > /sys/module/dmatest/parameters/timeout
- % echo 1 > /sys/module/dmatest/parameters/iterations
- % echo 1 > /sys/module/dmatest/parameters/run
+ % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1
-...or on the kernel command line:
+...or: ::
- dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1
+ % modprobe dmatest
+ % echo dma0chan0 > /sys/module/dmatest/parameters/channel
+ % echo 2000 > /sys/module/dmatest/parameters/timeout
+ % echo 1 > /sys/module/dmatest/parameters/iterations
+ % echo 1 > /sys/module/dmatest/parameters/run
-Hint: available channel list could be extracted by running the following
-command:
- % ls -1 /sys/class/dma/
+...or on the kernel command line: ::
+
+ dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1
+
+..hint:: available channel list could be extracted by running the following
+ command:
+
+::
+
+ % ls -1 /sys/class/dma/
Once started a message like "dmatest: Started 1 threads using dma0chan0" is
emitted. After that only test failure messages are reported until the test
@@ -39,8 +47,9 @@ stops.
Note that running a new test will not stop any in progress test.
-The following command returns the state of the test.
- % cat /sys/module/dmatest/parameters/run
+The following command returns the state of the test. ::
+
+ % cat /sys/module/dmatest/parameters/run
To wait for test completion userpace can poll 'run' until it is false, or use
the wait parameter. Specifying 'wait=1' when loading the module causes module
@@ -50,15 +59,19 @@ before returning. For example, the following scripts wait for 42 tests
to complete before exiting. Note that if 'iterations' is set to 'infinite' then
waiting is disabled.
-Example:
- % modprobe dmatest run=1 iterations=42 wait=1
- % modprobe -r dmatest
-...or:
- % modprobe dmatest run=1 iterations=42
- % cat /sys/module/dmatest/parameters/wait
- % modprobe -r dmatest
+Example: ::
+
+ % modprobe dmatest run=1 iterations=42 wait=1
+ % modprobe -r dmatest
- Part 3 - When built-in in the kernel...
+...or: ::
+
+ % modprobe dmatest run=1 iterations=42
+ % cat /sys/module/dmatest/parameters/wait
+ % modprobe -r dmatest
+
+Part 3 - When built-in in the kernel
+====================================
The module parameters that is supplied to the kernel command line will be used
for the first performed test. After user gets a control, the test could be
@@ -66,27 +79,32 @@ re-run with the same or different parameters. For the details see the above
section "Part 2 - When dmatest is built as a module..."
In both cases the module parameters are used as the actual values for the test
-case. You always could check them at run-time by running
- % grep -H . /sys/module/dmatest/parameters/*
+case. You always could check them at run-time by running ::
+
+ % grep -H . /sys/module/dmatest/parameters/*
- Part 4 - Gathering the test results
+Part 4 - Gathering the test results
+===================================
-Test results are printed to the kernel log buffer with the format:
+Test results are printed to the kernel log buffer with the format: ::
-"dmatest: result <channel>: <test id>: '<error msg>' with src_off=<val> dst_off=<val> len=<val> (<err code>)"
+ "dmatest: result <channel>: <test id>: '<error msg>' with src_off=<val> dst_off=<val> len=<val> (<err code>)"
-Example of output:
- % dmesg | tail -n 1
- dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
+Example of output: ::
+
+
+ % dmesg | tail -n 1
+ dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
The message format is unified across the different types of errors. A number in
the parens represents additional information, e.g. error code, error counter,
or status. A test thread also emits a summary line at completion listing the
number of tests executed, number that failed, and a result code.
-Example:
- % dmesg | tail -n 1
- dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0)
+Example: ::
+
+ % dmesg | tail -n 1
+ dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0)
The details of a data miscompare error are also emitted, but do not follow the
above format.
diff --git a/Documentation/driver-api/dmaengine/index.rst b/Documentation/driver-api/dmaengine/index.rst
new file mode 100644
index 000000000000..3026fa975937
--- /dev/null
+++ b/Documentation/driver-api/dmaengine/index.rst
@@ -0,0 +1,55 @@
+=======================
+DMAEngine documentation
+=======================
+
+DMAEngine documentation provides documents for various aspects of DMAEngine
+framework.
+
+DMAEngine documentation
+-----------------------
+
+This book helps with DMAengine internal APIs and guide for DMAEngine device
+driver writers.
+
+.. toctree::
+ :maxdepth: 1
+
+ provider
+
+DMAEngine client documentation
+------------------------------
+
+This book is a guide to device driver writers on how to use the Slave-DMA
+API of the DMAEngine. This is applicable only for slave DMA usage only.
+
+.. toctree::
+ :maxdepth: 1
+
+ client
+
+DMA Test documentation
+----------------------
+
+This book introduces how to test DMA drivers using dmatest module.
+
+.. toctree::
+ :maxdepth: 1
+
+ dmatest
+
+PXA DMA documentation
+----------------------
+
+This book adds some notes about PXA DMA
+
+.. toctree::
+ :maxdepth: 1
+
+ pxa_dma
+
+.. only:: subproject
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
new file mode 100644
index 000000000000..814acb4d2294
--- /dev/null
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -0,0 +1,508 @@
+==================================
+DMAengine controller documentation
+==================================
+
+Hardware Introduction
+=====================
+
+Most of the Slave DMA controllers have the same general principles of
+operations.
+
+They have a given number of channels to use for the DMA transfers, and
+a given number of requests lines.
+
+Requests and channels are pretty much orthogonal. Channels can be used
+to serve several to any requests. To simplify, channels are the
+entities that will be doing the copy, and requests what endpoints are
+involved.
+
+The request lines actually correspond to physical lines going from the
+DMA-eligible devices to the controller itself. Whenever the device
+will want to start a transfer, it will assert a DMA request (DRQ) by
+asserting that request line.
+
+A very simple DMA controller would only take into account a single
+parameter: the transfer size. At each clock cycle, it would transfer a
+byte of data from one buffer to another, until the transfer size has
+been reached.
+
+That wouldn't work well in the real world, since slave devices might
+require a specific number of bits to be transferred in a single
+cycle. For example, we may want to transfer as much data as the
+physical bus allows to maximize performances when doing a simple
+memory copy operation, but our audio device could have a narrower FIFO
+that requires data to be written exactly 16 or 24 bits at a time. This
+is why most if not all of the DMA controllers can adjust this, using a
+parameter called the transfer width.
+
+Moreover, some DMA controllers, whenever the RAM is used as a source
+or destination, can group the reads or writes in memory into a buffer,
+so instead of having a lot of small memory accesses, which is not
+really efficient, you'll get several bigger transfers. This is done
+using a parameter called the burst size, that defines how many single
+reads/writes it's allowed to do without the controller splitting the
+transfer into smaller sub-transfers.
+
+Our theoretical DMA controller would then only be able to do transfers
+that involve a single contiguous block of data. However, some of the
+transfers we usually have are not, and want to copy data from
+non-contiguous buffers to a contiguous buffer, which is called
+scatter-gather.
+
+DMAEngine, at least for mem2dev transfers, require support for
+scatter-gather. So we're left with two cases here: either we have a
+quite simple DMA controller that doesn't support it, and we'll have to
+implement it in software, or we have a more advanced DMA controller,
+that implements in hardware scatter-gather.
+
+The latter are usually programmed using a collection of chunks to
+transfer, and whenever the transfer is started, the controller will go
+over that collection, doing whatever we programmed there.
+
+This collection is usually either a table or a linked list. You will
+then push either the address of the table and its number of elements,
+or the first item of the list to one channel of the DMA controller,
+and whenever a DRQ will be asserted, it will go through the collection
+to know where to fetch the data from.
+
+Either way, the format of this collection is completely dependent on
+your hardware. Each DMA controller will require a different structure,
+but all of them will require, for every chunk, at least the source and
+destination addresses, whether it should increment these addresses or
+not and the three parameters we saw earlier: the burst size, the
+transfer width and the transfer size.
+
+The one last thing is that usually, slave devices won't issue DRQ by
+default, and you have to enable this in your slave device driver first
+whenever you're willing to use DMA.
+
+These were just the general memory-to-memory (also called mem2mem) or
+memory-to-device (mem2dev) kind of transfers. Most devices often
+support other kind of transfers or memory operations that dmaengine
+support and will be detailed later in this document.
+
+DMA Support in Linux
+====================
+
+Historically, DMA controller drivers have been implemented using the
+async TX API, to offload operations such as memory copy, XOR,
+cryptography, etc., basically any memory to memory operation.
+
+Over time, the need for memory to device transfers arose, and
+dmaengine was extended. Nowadays, the async TX API is written as a
+layer on top of dmaengine, and acts as a client. Still, dmaengine
+accommodates that API in some cases, and made some design choices to
+ensure that it stayed compatible.
+
+For more information on the Async TX API, please look the relevant
+documentation file in Documentation/crypto/async-tx-api.txt.
+
+DMAEngine APIs
+==============
+
+``struct dma_device`` Initialization
+------------------------------------
+
+Just like any other kernel framework, the whole DMAEngine registration
+relies on the driver filling a structure and registering against the
+framework. In our case, that structure is dma_device.
+
+The first thing you need to do in your driver is to allocate this
+structure. Any of the usual memory allocators will do, but you'll also
+need to initialize a few fields in there:
+
+- channels: should be initialized as a list using the
+ INIT_LIST_HEAD macro for example
+
+- src_addr_widths:
+ should contain a bitmask of the supported source transfer width
+
+- dst_addr_widths:
+ should contain a bitmask of the supported destination transfer width
+
+- directions:
+ should contain a bitmask of the supported slave directions
+ (i.e. excluding mem2mem transfers)
+
+- residue_granularity:
+
+ - Granularity of the transfer residue reported to dma_set_residue.
+ This can be either:
+
+ - Descriptor
+
+ - Your device doesn't support any kind of residue
+ reporting. The framework will only know that a particular
+ transaction descriptor is done.
+
+ - Segment
+
+ - Your device is able to report which chunks have been transferred
+
+ - Burst
+
+ - Your device is able to report which burst have been transferred
+
+ - dev: should hold the pointer to the ``struct device`` associated
+ to your current driver instance.
+
+Supported transaction types
+---------------------------
+
+The next thing you need is to set which transaction types your device
+(and driver) supports.
+
+Our ``dma_device structure`` has a field called cap_mask that holds the
+various types of transaction supported, and you need to modify this
+mask using the dma_cap_set function, with various flags depending on
+transaction types you support as an argument.
+
+All those capabilities are defined in the ``dma_transaction_type enum``,
+in ``include/linux/dmaengine.h``
+
+Currently, the types available are:
+
+- DMA_MEMCPY
+
+ - The device is able to do memory to memory copies
+
+- DMA_XOR
+
+ - The device is able to perform XOR operations on memory areas
+
+ - Used to accelerate XOR intensive tasks, such as RAID5
+
+- DMA_XOR_VAL
+
+ - The device is able to perform parity check using the XOR
+ algorithm against a memory buffer.
+
+- DMA_PQ
+
+ - The device is able to perform RAID6 P+Q computations, P being a
+ simple XOR, and Q being a Reed-Solomon algorithm.
+
+- DMA_PQ_VAL
+
+ - The device is able to perform parity check using RAID6 P+Q
+ algorithm against a memory buffer.
+
+- DMA_INTERRUPT
+
+ - The device is able to trigger a dummy transfer that will
+ generate periodic interrupts
+
+ - Used by the client drivers to register a callback that will be
+ called on a regular basis through the DMA controller interrupt
+
+- DMA_PRIVATE
+
+ - The devices only supports slave transfers, and as such isn't
+ available for async transfers.
+
+- DMA_ASYNC_TX
+
+ - Must not be set by the device, and will be set by the framework
+ if needed
+
+ - TODO: What is it about?
+
+- DMA_SLAVE
+
+ - The device can handle device to memory transfers, including
+ scatter-gather transfers.
+
+ - While in the mem2mem case we were having two distinct types to
+ deal with a single chunk to copy or a collection of them, here,
+ we just have a single transaction type that is supposed to
+ handle both.
+
+ - If you want to transfer a single contiguous memory buffer,
+ simply build a scatter list with only one item.
+
+- DMA_CYCLIC
+
+ - The device can handle cyclic transfers.
+
+ - A cyclic transfer is a transfer where the chunk collection will
+ loop over itself, with the last item pointing to the first.
+
+ - It's usually used for audio transfers, where you want to operate
+ on a single ring buffer that you will fill with your audio data.
+
+- DMA_INTERLEAVE
+
+ - The device supports interleaved transfer.
+
+ - These transfers can transfer data from a non-contiguous buffer
+ to a non-contiguous buffer, opposed to DMA_SLAVE that can
+ transfer data from a non-contiguous data set to a continuous
+ destination buffer.
+
+ - It's usually used for 2d content transfers, in which case you
+ want to transfer a portion of uncompressed data directly to the
+ display to print it
+
+These various types will also affect how the source and destination
+addresses change over time.
+
+Addresses pointing to RAM are typically incremented (or decremented)
+after each transfer. In case of a ring buffer, they may loop
+(DMA_CYCLIC). Addresses pointing to a device's register (e.g. a FIFO)
+are typically fixed.
+
+Device operations
+-----------------
+
+Our dma_device structure also requires a few function pointers in
+order to implement the actual logic, now that we described what
+operations we were able to perform.
+
+The functions that we have to fill in there, and hence have to
+implement, obviously depend on the transaction types you reported as
+supported.
+
+- ``device_alloc_chan_resources``
+
+- ``device_free_chan_resources``
+
+ - These functions will be called whenever a driver will call
+ ``dma_request_channel`` or ``dma_release_channel`` for the first/last
+ time on the channel associated to that driver.
+
+ - They are in charge of allocating/freeing all the needed
+ resources in order for that channel to be useful for your driver.
+
+ - These functions can sleep.
+
+- ``device_prep_dma_*``
+
+ - These functions are matching the capabilities you registered
+ previously.
+
+ - These functions all take the buffer or the scatterlist relevant
+ for the transfer being prepared, and should create a hardware
+ descriptor or a list of hardware descriptors from it
+
+ - These functions can be called from an interrupt context
+
+ - Any allocation you might do should be using the GFP_NOWAIT
+ flag, in order not to potentially sleep, but without depleting
+ the emergency pool either.
+
+ - Drivers should try to pre-allocate any memory they might need
+ during the transfer setup at probe time to avoid putting to
+ much pressure on the nowait allocator.
+
+ - It should return a unique instance of the
+ ``dma_async_tx_descriptor structure``, that further represents this
+ particular transfer.
+
+ - This structure can be initialized using the function
+ ``dma_async_tx_descriptor_init``.
+
+ - You'll also need to set two fields in this structure:
+
+ - flags:
+ TODO: Can it be modified by the driver itself, or
+ should it be always the flags passed in the arguments
+
+ - tx_submit: A pointer to a function you have to implement,
+ that is supposed to push the current transaction descriptor to a
+ pending queue, waiting for issue_pending to be called.
+
+ - In this structure the function pointer callback_result can be
+ initialized in order for the submitter to be notified that a
+ transaction has completed. In the earlier code the function pointer
+ callback has been used. However it does not provide any status to the
+ transaction and will be deprecated. The result structure defined as
+ ``dmaengine_result`` that is passed in to callback_result
+ has two fields:
+
+ - result: This provides the transfer result defined by
+ ``dmaengine_tx_result``. Either success or some error condition.
+
+ - residue: Provides the residue bytes of the transfer for those that
+ support residue.
+
+- ``device_issue_pending``
+
+ - Takes the first transaction descriptor in the pending queue,
+ and starts the transfer. Whenever that transfer is done, it
+ should move to the next transaction in the list.
+
+ - This function can be called in an interrupt context
+
+- ``device_tx_status``
+
+ - Should report the bytes left to go over on the given channel
+
+ - Should only care about the transaction descriptor passed as
+ argument, not the currently active one on a given channel
+
+ - The tx_state argument might be NULL
+
+ - Should use dma_set_residue to report it
+
+ - In the case of a cyclic transfer, it should only take into
+ account the current period.
+
+ - This function can be called in an interrupt context.
+
+- device_config
+
+ - Reconfigures the channel with the configuration given as argument
+
+ - This command should NOT perform synchronously, or on any
+ currently queued transfers, but only on subsequent ones
+
+ - In this case, the function will receive a ``dma_slave_config``
+ structure pointer as an argument, that will detail which
+ configuration to use.
+
+ - Even though that structure contains a direction field, this
+ field is deprecated in favor of the direction argument given to
+ the prep_* functions
+
+ - This call is mandatory for slave operations only. This should NOT be
+ set or expected to be set for memcpy operations.
+ If a driver support both, it should use this call for slave
+ operations only and not for memcpy ones.
+
+- device_pause
+
+ - Pauses a transfer on the channel
+
+ - This command should operate synchronously on the channel,
+ pausing right away the work of the given channel
+
+- device_resume
+
+ - Resumes a transfer on the channel
+
+ - This command should operate synchronously on the channel,
+ resuming right away the work of the given channel
+
+- device_terminate_all
+
+ - Aborts all the pending and ongoing transfers on the channel
+
+ - For aborted transfers the complete callback should not be called
+
+ - Can be called from atomic context or from within a complete
+ callback of a descriptor. Must not sleep. Drivers must be able
+ to handle this correctly.
+
+ - Termination may be asynchronous. The driver does not have to
+ wait until the currently active transfer has completely stopped.
+ See device_synchronize.
+
+- device_synchronize
+
+ - Must synchronize the termination of a channel to the current
+ context.
+
+ - Must make sure that memory for previously submitted
+ descriptors is no longer accessed by the DMA controller.
+
+ - Must make sure that all complete callbacks for previously
+ submitted descriptors have finished running and none are
+ scheduled to run.
+
+ - May sleep.
+
+
+Misc notes
+==========
+
+(stuff that should be documented, but don't really know
+where to put them)
+
+``dma_run_dependencies``
+
+- Should be called at the end of an async TX transfer, and can be
+ ignored in the slave transfers case.
+
+- Makes sure that dependent operations are run before marking it
+ as complete.
+
+dma_cookie_t
+
+- it's a DMA transaction ID that will increment over time.
+
+- Not really relevant any more since the introduction of ``virt-dma``
+ that abstracts it away.
+
+DMA_CTRL_ACK
+
+- If clear, the descriptor cannot be reused by provider until the
+ client acknowledges receipt, i.e. has has a chance to establish any
+ dependency chains
+
+- This can be acked by invoking async_tx_ack()
+
+- If set, does not mean descriptor can be reused
+
+DMA_CTRL_REUSE
+
+- If set, the descriptor can be reused after being completed. It should
+ not be freed by provider if this flag is set.
+
+- The descriptor should be prepared for reuse by invoking
+ ``dmaengine_desc_set_reuse()`` which will set DMA_CTRL_REUSE.
+
+- ``dmaengine_desc_set_reuse()`` will succeed only when channel support
+ reusable descriptor as exhibited by capabilities
+
+- As a consequence, if a device driver wants to skip the
+ ``dma_map_sg()`` and ``dma_unmap_sg()`` in between 2 transfers,
+ because the DMA'd data wasn't used, it can resubmit the transfer right after
+ its completion.
+
+- Descriptor can be freed in few ways
+
+ - Clearing DMA_CTRL_REUSE by invoking
+ ``dmaengine_desc_clear_reuse()`` and submitting for last txn
+
+ - Explicitly invoking ``dmaengine_desc_free()``, this can succeed only
+ when DMA_CTRL_REUSE is already set
+
+ - Terminating the channel
+
+- DMA_PREP_CMD
+
+ - If set, the client driver tells DMA controller that passed data in DMA
+ API is command data.
+
+ - Interpretation of command data is DMA controller specific. It can be
+ used for issuing commands to other peripherals/register reads/register
+ writes for which the descriptor should be in different format from
+ normal data descriptors.
+
+General Design Notes
+====================
+
+Most of the DMAEngine drivers you'll see are based on a similar design
+that handles the end of transfer interrupts in the handler, but defer
+most work to a tasklet, including the start of a new transfer whenever
+the previous transfer ended.
+
+This is a rather inefficient design though, because the inter-transfer
+latency will be not only the interrupt latency, but also the
+scheduling latency of the tasklet, which will leave the channel idle
+in between, which will slow down the global transfer rate.
+
+You should avoid this kind of practice, and instead of electing a new
+transfer in your tasklet, move that part to the interrupt handler in
+order to have a shorter idle window (that we can't really avoid
+anyway).
+
+Glossary
+========
+
+- Burst: A number of consecutive read or write operations that
+ can be queued to buffers before being flushed to memory.
+
+- Chunk: A contiguous collection of bursts
+
+- Transfer: A collection of chunks (be it contiguous or not)
diff --git a/Documentation/driver-api/dmaengine/pxa_dma.rst b/Documentation/driver-api/dmaengine/pxa_dma.rst
new file mode 100644
index 000000000000..442ee691a190
--- /dev/null
+++ b/Documentation/driver-api/dmaengine/pxa_dma.rst
@@ -0,0 +1,190 @@
+==============================
+PXA/MMP - DMA Slave controller
+==============================
+
+Constraints
+===========
+
+a) Transfers hot queuing
+A driver submitting a transfer and issuing it should be granted the transfer
+is queued even on a running DMA channel.
+This implies that the queuing doesn't wait for the previous transfer end,
+and that the descriptor chaining is not only done in the irq/tasklet code
+triggered by the end of the transfer.
+A transfer which is submitted and issued on a phy doesn't wait for a phy to
+stop and restart, but is submitted on a "running channel". The other
+drivers, especially mmp_pdma waited for the phy to stop before relaunching
+a new transfer.
+
+b) All transfers having asked for confirmation should be signaled
+Any issued transfer with DMA_PREP_INTERRUPT should trigger a callback call.
+This implies that even if an irq/tasklet is triggered by end of tx1, but
+at the time of irq/dma tx2 is already finished, tx1->complete() and
+tx2->complete() should be called.
+
+c) Channel running state
+A driver should be able to query if a channel is running or not. For the
+multimedia case, such as video capture, if a transfer is submitted and then
+a check of the DMA channel reports a "stopped channel", the transfer should
+not be issued until the next "start of frame interrupt", hence the need to
+know if a channel is in running or stopped state.
+
+d) Bandwidth guarantee
+The PXA architecture has 4 levels of DMAs priorities : high, normal, low.
+The high priorities get twice as much bandwidth as the normal, which get twice
+as much as the low priorities.
+A driver should be able to request a priority, especially the real-time
+ones such as pxa_camera with (big) throughputs.
+
+Design
+======
+a) Virtual channels
+Same concept as in sa11x0 driver, ie. a driver was assigned a "virtual
+channel" linked to the requestor line, and the physical DMA channel is
+assigned on the fly when the transfer is issued.
+
+b) Transfer anatomy for a scatter-gather transfer
+
+::
+
+ +------------+-----+---------------+----------------+-----------------+
+ | desc-sg[0] | ... | desc-sg[last] | status updater | finisher/linker |
+ +------------+-----+---------------+----------------+-----------------+
+
+This structure is pointed by dma->sg_cpu.
+The descriptors are used as follows :
+
+ - desc-sg[i]: i-th descriptor, transferring the i-th sg
+ element to the video buffer scatter gather
+
+ - status updater
+ Transfers a single u32 to a well known dma coherent memory to leave
+ a trace that this transfer is done. The "well known" is unique per
+ physical channel, meaning that a read of this value will tell which
+ is the last finished transfer at that point in time.
+
+ - finisher: has ddadr=DADDR_STOP, dcmd=ENDIRQEN
+
+ - linker: has ddadr= desc-sg[0] of next transfer, dcmd=0
+
+c) Transfers hot-chaining
+Suppose the running chain is:
+
+::
+
+ Buffer 1 Buffer 2
+ +---------+----+---+ +----+----+----+---+
+ | d0 | .. | dN | l | | d0 | .. | dN | f |
+ +---------+----+-|-+ ^----+----+----+---+
+ | |
+ +----+
+
+After a call to dmaengine_submit(b3), the chain will look like:
+
+::
+
+ Buffer 1 Buffer 2 Buffer 3
+ +---------+----+---+ +----+----+----+---+ +----+----+----+---+
+ | d0 | .. | dN | l | | d0 | .. | dN | l | | d0 | .. | dN | f |
+ +---------+----+-|-+ ^----+----+----+-|-+ ^----+----+----+---+
+ | | | |
+ +----+ +----+
+ new_link
+
+If while new_link was created the DMA channel stopped, it is _not_
+restarted. Hot-chaining doesn't break the assumption that
+dma_async_issue_pending() is to be used to ensure the transfer is actually started.
+
+One exception to this rule :
+
+- if Buffer1 and Buffer2 had all their addresses 8 bytes aligned
+
+- and if Buffer3 has at least one address not 4 bytes aligned
+
+- then hot-chaining cannot happen, as the channel must be stopped, the
+ "align bit" must be set, and the channel restarted As a consequence,
+ such a transfer tx_submit() will be queued on the submitted queue, and
+ this specific case if the DMA is already running in aligned mode.
+
+d) Transfers completion updater
+Each time a transfer is completed on a channel, an interrupt might be
+generated or not, up to the client's request. But in each case, the last
+descriptor of a transfer, the "status updater", will write the latest
+transfer being completed into the physical channel's completion mark.
+
+This will speed up residue calculation, for large transfers such as video
+buffers which hold around 6k descriptors or more. This also allows without
+any lock to find out what is the latest completed transfer in a running
+DMA chain.
+
+e) Transfers completion, irq and tasklet
+When a transfer flagged as "DMA_PREP_INTERRUPT" is finished, the dma irq
+is raised. Upon this interrupt, a tasklet is scheduled for the physical
+channel.
+
+The tasklet is responsible for :
+
+- reading the physical channel last updater mark
+
+- calling all the transfer callbacks of finished transfers, based on
+ that mark, and each transfer flags.
+
+If a transfer is completed while this handling is done, a dma irq will
+be raised, and the tasklet will be scheduled once again, having a new
+updater mark.
+
+f) Residue
+Residue granularity will be descriptor based. The issued but not completed
+transfers will be scanned for all of their descriptors against the
+currently running descriptor.
+
+g) Most complicated case of driver's tx queues
+The most tricky situation is when :
+
+ - there are not "acked" transfers (tx0)
+
+ - a driver submitted an aligned tx1, not chained
+
+ - a driver submitted an aligned tx2 => tx2 is cold chained to tx1
+
+ - a driver issued tx1+tx2 => channel is running in aligned mode
+
+ - a driver submitted an aligned tx3 => tx3 is hot-chained
+
+ - a driver submitted an unaligned tx4 => tx4 is put in submitted queue,
+ not chained
+
+ - a driver issued tx4 => tx4 is put in issued queue, not chained
+
+ - a driver submitted an aligned tx5 => tx5 is put in submitted queue, not
+ chained
+
+ - a driver submitted an aligned tx6 => tx6 is put in submitted queue,
+ cold chained to tx5
+
+ This translates into (after tx4 is issued) :
+
+ - issued queue
+
+ ::
+
+ +-----+ +-----+ +-----+ +-----+
+ | tx1 | | tx2 | | tx3 | | tx4 |
+ +---|-+ ^---|-+ ^-----+ +-----+
+ | | | |
+ +---+ +---+
+ - submitted queue
+ +-----+ +-----+
+ | tx5 | | tx6 |
+ +---|-+ ^-----+
+ | |
+ +---+
+
+- completed queue : empty
+
+- allocated queue : tx0
+
+It should be noted that after tx3 is completed, the channel is stopped, and
+restarted in "unaligned mode" to handle tx4.
+
+Author: Robert Jarzmik <robert.jarzmik@free.fr>
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 9c20624842b7..d17a9876b473 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -46,6 +46,7 @@ available subsections can be seen below.
pinctl
gpio
misc_devices
+ dmaengine/index
.. only:: subproject and html
diff --git a/Documentation/driver-api/usb/usb.rst b/Documentation/driver-api/usb/usb.rst
index dba0f876b36f..078e981e2b16 100644
--- a/Documentation/driver-api/usb/usb.rst
+++ b/Documentation/driver-api/usb/usb.rst
@@ -690,9 +690,7 @@ The USB devices are now exported via debugfs:
This file is handy for status viewing tools in user mode, which can scan
the text format and ignore most of it. More detailed device status
(including class and vendor status) is available from device-specific
-files. For information about the current format of this file, see the
-``Documentation/usb/proc_usb_info.txt`` file in your Linux kernel
-sources.
+files. For information about the current format of this file, see below.
This file, in combination with the poll() system call, can also be used
to detect when devices are added or removed::
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index a38d3aa4d189..79c22d096bbc 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -77,8 +77,8 @@ C. Boot options
1. fbcon=font:<name>
Select the initial font to use. The value 'name' can be any of the
- compiled-in fonts: VGA8x16, 7x14, 10x18, VGA8x8, MINI4x6, RomanLarge,
- SUN8x16, SUN12x22, ProFont6x11, Acorn8x8, PEARL8x8.
+ compiled-in fonts: 10x18, 6x10, 7x14, Acorn8x8, MINI4x6,
+ PEARL8x8, ProFont6x11, SUN12x22, SUN8x16, VGA8x16, VGA8x8.
Note, not all drivers can handle font with widths not divisible by 8,
such as vga16fb.
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index 76bbd7fe27b3..f377290fe48e 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -34,6 +34,6 @@
| tile: | TODO |
| um: | TODO |
| unicore32: | TODO |
- | x86: | ok |
+ | x86: | ok | 64-bit only
| xtensa: | TODO |
-----------------------
diff --git a/Documentation/filesystems/dnotify.txt b/Documentation/filesystems/dnotify.txt
index 6baf88f46859..15156883d321 100644
--- a/Documentation/filesystems/dnotify.txt
+++ b/Documentation/filesystems/dnotify.txt
@@ -62,7 +62,7 @@ disabled, fcntl(fd, F_NOTIFY, ...) will return -EINVAL.
Example
-------
-See Documentation/filesystems/dnotify_test.c for an example.
+See tools/testing/selftests/filesystems/dnotify_test.c for an example.
NOTE
----
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 5a8f7f4d2bca..75236c0c2ac2 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -94,10 +94,10 @@ Note: More extensive information for getting started with ext4 can be
* ability to pack bitmaps and inode tables into larger virtual groups via the
flex_bg feature
* large file support
-* Inode allocation using large virtual block groups via flex_bg
+* inode allocation using large virtual block groups via flex_bg
* delayed allocation
* large block (up to pagesize) support
-* efficient new ordered mode in JBD2 and ext4(avoid using buffer head to force
+* efficient new ordered mode in JBD2 and ext4 (avoid using buffer head to force
the ordering)
[1] Filesystems with a block size of 1k may see a limit imposed by the
@@ -105,7 +105,7 @@ directory hash tree having a maximum depth of two.
2.2 Candidate features for future inclusion
-* Online defrag (patches available but not well tested)
+* online defrag (patches available but not well tested)
* reduced mke2fs time via lazy itable initialization in conjunction with
the uninit_bg feature (capability to do this is available in e2fsprogs
but a kernel thread to do lazy zeroing of unused inode table blocks
@@ -602,7 +602,7 @@ Table of Ext4 specific ioctls
bitmaps and inode table, the userspace tool thus
just passes the new number of blocks.
-EXT4_IOC_SWAP_BOOT Swap i_blocks and associated attributes
+ EXT4_IOC_SWAP_BOOT Swap i_blocks and associated attributes
(like i_blocks, i_size, i_flags, ...) from
the specified inode with inode
EXT4_BOOT_LOADER_INO (#5). This is typically
diff --git a/Documentation/hid/hiddev.txt b/Documentation/hid/hiddev.txt
index 6e8c9f1d2f22..638448707aa2 100644
--- a/Documentation/hid/hiddev.txt
+++ b/Documentation/hid/hiddev.txt
@@ -12,7 +12,7 @@ To support these disparate requirements, the Linux USB system provides
HID events to two separate interfaces:
* the input subsystem, which converts HID events into normal input
device interfaces (such as keyboard, mouse and joystick) and a
-normalised event interface - see Documentation/input/input.txt
+normalised event interface - see Documentation/input/input.rst
* the hiddev interface, which provides fairly raw HID events
The data flow for a HID event produced by a device is something like
diff --git a/Documentation/hwmon/max31785 b/Documentation/hwmon/max31785
new file mode 100644
index 000000000000..45fb6093dec2
--- /dev/null
+++ b/Documentation/hwmon/max31785
@@ -0,0 +1,51 @@
+Kernel driver max31785
+======================
+
+Supported chips:
+ * Maxim MAX31785, MAX31785A
+ Prefix: 'max31785' or 'max31785a'
+ Addresses scanned: -
+ Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31785.pdf
+
+Author: Andrew Jeffery <andrew@aj.id.au>
+
+Description
+-----------
+
+The Maxim MAX31785 is a PMBus device providing closed-loop, multi-channel fan
+management with temperature and remote voltage sensing. Various fan control
+features are provided, including PWM frequency control, temperature hysteresis,
+dual tachometer measurements, and fan health monitoring.
+
+For dual rotor fan configuration, the MAX31785 exposes the slowest rotor of the
+two in the fan[1-4]_input attributes.
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+Sysfs attributes
+----------------
+
+fan[1-4]_alarm Fan alarm.
+fan[1-4]_fault Fan fault.
+fan[1-4]_input Fan RPM.
+
+in[1-6]_crit Critical maximum output voltage
+in[1-6]_crit_alarm Output voltage critical high alarm
+in[1-6]_input Measured output voltage
+in[1-6]_label "vout[18-23]"
+in[1-6]_lcrit Critical minimum output voltage
+in[1-6]_lcrit_alarm Output voltage critical low alarm
+in[1-6]_max Maximum output voltage
+in[1-6]_max_alarm Output voltage high alarm
+in[1-6]_min Minimum output voltage
+in[1-6]_min_alarm Output voltage low alarm
+
+temp[1-11]_crit Critical high temperature
+temp[1-11]_crit_alarm Chip temperature critical high alarm
+temp[1-11]_input Measured temperature
+temp[1-11]_max Maximum temperature
+temp[1-11]_max_alarm Chip temperature high alarm
diff --git a/Documentation/hwmon/sht15 b/Documentation/hwmon/sht15
index 778987d1856f..5e3207c3b177 100644
--- a/Documentation/hwmon/sht15
+++ b/Documentation/hwmon/sht15
@@ -42,8 +42,7 @@ chip. These coefficients are used to internally calibrate the signals from the
sensors. Disabling the reload of those coefficients allows saving 10ms for each
measurement and decrease power consumption, while losing on precision.
-Some options may be set directly in the sht15_platform_data structure
-or via sysfs attributes.
+Some options may be set via sysfs attributes.
Notes:
* The regulator supply name is set to "vcc".
diff --git a/Documentation/input/devices/xpad.rst b/Documentation/input/devices/xpad.rst
index 5a709ab77c8d..b8bd65962dd8 100644
--- a/Documentation/input/devices/xpad.rst
+++ b/Documentation/input/devices/xpad.rst
@@ -230,4 +230,5 @@ Historic Edits
2005-03-19 - Dominic Cerquetti <binary1230@yahoo.com>
- added stuff for dance pads, new d-pad->axes mappings
-Later changes may be viewed with 'git log Documentation/input/xpad.txt'
+Later changes may be viewed with
+'git log --follow Documentation/input/devices/xpad.rst'
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 19276f5d195c..1c707fc9b141 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -184,7 +184,7 @@ is done when dirty_ratio is reached.
DO_CPU:
Enable CPU frequency scaling when in laptop mode. (Requires CPUFreq to be setup.
-See Documentation/cpu-freq/user-guide.txt for more info. Disabled by default.)
+See Documentation/admin-guide/pm/cpufreq.rst for more info. Disabled by default.)
CPU_MAXFREQ:
@@ -287,7 +287,7 @@ MINIMUM_BATTERY_MINUTES=10
# Should the maximum CPU frequency be adjusted down while on battery?
# Requires CPUFreq to be setup.
-# See Documentation/cpu-freq/user-guide.txt for more info
+# See Documentation/admin-guide/pm/cpufreq.rst for more info
#DO_CPU=0
# When on battery what is the maximum CPU speed that the system should
@@ -378,7 +378,7 @@ BATT_HD=${BATT_HD:-'4'}
DIRTY_RATIO=${DIRTY_RATIO:-'40'}
# cpu frequency scaling
-# See Documentation/cpu-freq/user-guide.txt for more info
+# See Documentation/admin-guide/pm/cpufreq.rst for more info
DO_CPU=${CPU_MANAGE:-'0'}
CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'}
diff --git a/Documentation/locking/rt-mutex-design.txt b/Documentation/locking/rt-mutex-design.txt
index 6c6e8c2410de..3d7b865539cc 100644
--- a/Documentation/locking/rt-mutex-design.txt
+++ b/Documentation/locking/rt-mutex-design.txt
@@ -8,7 +8,7 @@ RT-mutex implementation design
This document tries to describe the design of the rtmutex.c implementation.
It doesn't describe the reasons why rtmutex.c exists. For that please see
-Documentation/rt-mutex.txt. Although this document does explain problems
+Documentation/locking/rt-mutex.txt. Although this document does explain problems
that happen without this code, but that is in the concept to understand
what the code actually is doing.
diff --git a/Documentation/media/dvb-drivers/bt8xx.rst b/Documentation/media/dvb-drivers/bt8xx.rst
index b43958b7340c..e3e387bdf498 100644
--- a/Documentation/media/dvb-drivers/bt8xx.rst
+++ b/Documentation/media/dvb-drivers/bt8xx.rst
@@ -18,7 +18,7 @@ General information
This class of cards has a bt878a as the PCI interface, and require the bttv driver
for accessing the i2c bus and the gpio pins of the bt8xx chipset.
-Please see Documentation/dvb/cards.txt => o Cards based on the Conexant Bt8xx PCI bridge:
+Please see Documentation/media/dvb-drivers/cards.rst => o Cards based on the Conexant Bt8xx PCI bridge:
Compiling kernel please enable:
@@ -45,7 +45,7 @@ Loading Modules
Regular case: If the bttv driver detects a bt8xx-based DVB card, all frontend and backend modules will be loaded automatically.
Exceptions are:
- Old TwinHan DST cards or clones with or without CA slot and not containing an Eeprom.
-People running udev please see Documentation/dvb/udev.txt.
+People running udev please see Documentation/media/dvb-drivers/udev.rst.
In the following cases overriding the PCI type detection for dvb-bt8xx might be necessary:
@@ -72,7 +72,7 @@ Useful parameters for verbosity level and debugging the dst module:
The autodetected values are determined by the cards' "response string".
In your logs see f. ex.: dst_get_device_id: Recognize [DSTMCI].
For bug reports please send in a complete log with verbose=4 activated.
-Please also see Documentation/dvb/ci.txt.
+Please also see Documentation/media/dvb-drivers/ci.rst.
Running multiple cards
~~~~~~~~~~~~~~~~~~~~~~
@@ -100,7 +100,7 @@ Examples of card ID's:
$ modprobe bttv card=113 card=135
-For a full list of card ID's please see Documentation/video4linux/CARDLIST.bttv.
+For a full list of card ID's please see Documentation/media/v4l-drivers/bttv-cardlist.rst.
In case of further problems please subscribe and send questions to the mailing list: linux-dvb@linuxtv.org.
Probing the cards with broken PCI subsystem ID
diff --git a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
index 9d6c860271cb..d311a6866b3b 100644
--- a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
@@ -431,7 +431,7 @@ MPEG stream.
*Historical context*: This format specification originates from a
custom, embedded, sliced VBI data format used by the ``ivtv`` driver.
This format has already been informally specified in the kernel sources
-in the file ``Documentation/video4linux/cx2341x/README.vbi`` . The
+in the file ``Documentation/media/v4l-drivers/cx2341x.rst`` . The
maximum size of the payload and other aspects of this format are driven
by the CX23415 MPEG decoder's capabilities and limitations with respect
to extracting, decoding, and displaying sliced VBI data embedded within
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index a3e81c1d276b..dfe49ae57e78 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -284,7 +284,7 @@ enum v4l2_mpeg_stream_vbi_fmt -
* - ``V4L2_MPEG_STREAM_VBI_FMT_IVTV``
- VBI in private packets, IVTV format (documented in the kernel
sources in the file
- ``Documentation/video4linux/cx2341x/README.vbi``)
+ ``Documentation/media/v4l-drivers/cx2341x.rst``)
diff --git a/Documentation/media/uapi/v4l/pixfmt-reserved.rst b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
index 521adb795535..38af1472a4b4 100644
--- a/Documentation/media/uapi/v4l/pixfmt-reserved.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
@@ -52,7 +52,7 @@ please make a proposal on the linux-media mailing list.
`http://www.ivtvdriver.org/ <http://www.ivtvdriver.org/>`__
The format is documented in the kernel sources in the file
- ``Documentation/video4linux/cx2341x/README.hm12``
+ ``Documentation/media/v4l-drivers/cx2341x.rst``
* .. _V4L2-PIX-FMT-CPIA1:
- ``V4L2_PIX_FMT_CPIA1``
diff --git a/Documentation/media/v4l-drivers/bttv.rst b/Documentation/media/v4l-drivers/bttv.rst
index 195ccaac2816..5f35e2fb5afa 100644
--- a/Documentation/media/v4l-drivers/bttv.rst
+++ b/Documentation/media/v4l-drivers/bttv.rst
@@ -307,7 +307,7 @@ console and let some terminal application log the messages. /me uses
screen. See Documentation/admin-guide/serial-console.rst for details on setting
up a serial console.
-Read Documentation/admin-guide/oops-tracing.rst to learn how to get any useful
+Read Documentation/admin-guide/bug-hunting.rst to learn how to get any useful
information out of a register+stack dump printed by the kernel on
protection faults (so-called "kernel oops").
diff --git a/Documentation/media/v4l-drivers/max2175.rst b/Documentation/media/v4l-drivers/max2175.rst
index 04478c25d57a..b1a4c89fd869 100644
--- a/Documentation/media/v4l-drivers/max2175.rst
+++ b/Documentation/media/v4l-drivers/max2175.rst
@@ -7,7 +7,7 @@ The MAX2175 driver implements the following driver-specific controls:
-------------------------------
Enable/Disable I2S output of the tuner. This is a private control
that can be accessed only using the subdev interface.
- Refer to Documentation/media/kapi/v4l2-controls for more details.
+ Refer to Documentation/media/kapi/v4l2-controls.rst for more details.
.. flat-table::
:header-rows: 0
diff --git a/Documentation/networking/cdc_mbim.txt b/Documentation/networking/cdc_mbim.txt
index e4c376abbdad..4e68f0bc5dba 100644
--- a/Documentation/networking/cdc_mbim.txt
+++ b/Documentation/networking/cdc_mbim.txt
@@ -332,8 +332,8 @@ References
[5] "MBIM (Mobile Broadband Interface Model) Registry"
- http://compliance.usb.org/mbim/
-[6] "/dev/bus/usb filesystem output"
- - Documentation/usb/proc_usb_info.txt
+[6] "/sys/kernel/debug/usb/devices output format"
+ - Documentation/driver-api/usb/usb.rst
[7] "/sys/bus/usb/devices/.../descriptors"
- Documentation/ABI/stable/sysfs-bus-usb
diff --git a/Documentation/networking/checksum-offloads.txt b/Documentation/networking/checksum-offloads.txt
index d52d191bbb0c..27bc09cfcf6d 100644
--- a/Documentation/networking/checksum-offloads.txt
+++ b/Documentation/networking/checksum-offloads.txt
@@ -47,7 +47,7 @@ The requirements for GSO are more complicated, because when segmenting an
(section 'E') for more details.
A driver declares its offload capabilities in netdev->hw_features; see
- Documentation/networking/netdev-features for more. Note that a device
+ Documentation/networking/netdev-features.txt for more. Note that a device
which only advertises NETIF_F_IP[V6]_CSUM must still obey the csum_start
and csum_offset given in the SKB; if it tries to deduce these itself in
hardware (as some NICs do) the driver should check that the values in the
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index f3b9e507ab05..bf654845556e 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -1055,7 +1055,7 @@ TX_RING part only TP_STATUS_AVAILABLE is set, then the tp_sec and tp_{n,u}sec
members do not contain a valid value. For TX_RINGs, by default no timestamp
is generated!
-See include/linux/net_tstamp.h and Documentation/networking/timestamping
+See include/linux/net_tstamp.h and Documentation/networking/timestamping.txt
for more information on hardware timestamps.
-------------------------------------------------------------------------------
diff --git a/Documentation/pi-futex.txt b/Documentation/pi-futex.txt
index aafddbee7377..b154f6c0c36e 100644
--- a/Documentation/pi-futex.txt
+++ b/Documentation/pi-futex.txt
@@ -119,4 +119,4 @@ properties of futexes, and all four combinations are possible: futex,
robust-futex, PI-futex, robust+PI-futex.
More details about priority inheritance can be found in
-Documentation/rt-mutex.txt.
+Documentation/locking/rt-mutex.txt.
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt
index 974916ff6608..27df7f98668a 100644
--- a/Documentation/power/interface.txt
+++ b/Documentation/power/interface.txt
@@ -24,7 +24,8 @@ platform.
If one of the strings listed in /sys/power/state is written to it, the system
will attempt to transition into the corresponding sleep state. Refer to
-Documentation/power/states.txt for a description of each of those states.
+Documentation/admin-guide/pm/sleep-states.rst for a description of each of
+those states.
/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
Specifically, it tells the kernel what to do after creating a hibernation image.
@@ -42,7 +43,7 @@ The currently selected option is printed in square brackets.
The 'platform' option is only available if the platform provides a special
mechanism to put the system to sleep after creating a hibernation image (ACPI
does that, for example). The 'suspend' option is available if Suspend-to-RAM
-is supported. Refer to Documentation/power/basic_pm_debugging.txt for the
+is supported. Refer to Documentation/power/basic-pm-debugging.txt for the
description of the 'test_resume' option.
To select an option, write the string representing it to /sys/power/disk.
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index a1b7f7158930..d17fdf8f45ef 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -8,7 +8,7 @@ management. Based on previous work by Patrick Mochel <mochel@transmeta.com>
This document only covers the aspects of power management specific to PCI
devices. For general description of the kernel's interfaces related to device
-power management refer to Documentation/power/admin-guide/devices.rst and
+power management refer to Documentation/driver-api/pm/devices.rst and
Documentation/power/runtime_pm.txt.
---------------------------------------------------------------------------
@@ -417,7 +417,7 @@ pm->runtime_idle() callback.
2.4. System-Wide Power Transitions
----------------------------------
There are a few different types of system-wide power transitions, described in
-Documentation/power/admin-guide/devices.rst. Each of them requires devices to be handled
+Documentation/driver-api/pm/devices.rst. Each of them requires devices to be handled
in a specific way and the PM core executes subsystem-level power management
callbacks for this purpose. They are executed in phases such that each phase
involves executing the same subsystem-level callback for every device belonging
@@ -623,7 +623,7 @@ System restore requires a hibernation image to be loaded into memory and the
pre-hibernation memory contents to be restored before the pre-hibernation system
activity can be resumed.
-As described in Documentation/power/admin-guide/devices.rst, the hibernation image is loaded
+As described in Documentation/driver-api/pm/devices.rst, the hibernation image is loaded
into memory by a fresh instance of the kernel, called the boot kernel, which in
turn is loaded and run by a boot loader in the usual way. After the boot kernel
has loaded the image, it needs to replace its own code and data with the code
@@ -677,7 +677,7 @@ controlling the runtime power management of their devices.
At the time of this writing there are two ways to define power management
callbacks for a PCI device driver, the recommended one, based on using a
-dev_pm_ops structure described in Documentation/power/admin-guide/devices.rst, and the
+dev_pm_ops structure described in Documentation/driver-api/pm/devices.rst, and the
"legacy" one, in which the .suspend(), .suspend_late(), .resume_early(), and
.resume() callbacks from struct pci_driver are used. The legacy approach,
however, doesn't allow one to define runtime power management callbacks and is
@@ -1046,5 +1046,5 @@ PCI Local Bus Specification, Rev. 3.0
PCI Bus Power Management Interface Specification, Rev. 1.2
Advanced Configuration and Power Interface (ACPI) Specification, Rev. 3.0b
PCI Express Base Specification, Rev. 2.0
-Documentation/power/admin-guide/devices.rst
+Documentation/driver-api/pm/devices.rst
Documentation/power/runtime_pm.txt
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 625549d4c74a..57af2f7963ee 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -680,7 +680,7 @@ left in runtime suspend. If that happens, the PM core will not execute any
system suspend and resume callbacks for all of those devices, except for the
complete callback, which is then entirely responsible for handling the device
as appropriate. This only applies to system suspend transitions that are not
-related to hibernation (see Documentation/power/admin-guide/devices.rst for more
+related to hibernation (see Documentation/driver-api/pm/devices.rst for more
information).
The PM core does its best to reduce the probability of race conditions between
diff --git a/Documentation/process/3.Early-stage.rst b/Documentation/process/3.Early-stage.rst
index af2c0af931d6..be00716071d4 100644
--- a/Documentation/process/3.Early-stage.rst
+++ b/Documentation/process/3.Early-stage.rst
@@ -178,7 +178,7 @@ matter is (1) kernel developers tend to be busy, (2) there is no shortage
of people with grand plans and little code (or even prospect of code) to
back them up, and (3) nobody is obligated to review or comment on ideas
posted by others. Beyond that, high-level designs often hide problems
-which are only reviewed when somebody actually tries to implement those
+which are only revealed when somebody actually tries to implement those
designs; for that reason, kernel developers would rather see the code.
If a request-for-comments posting yields little in the way of comments, do
diff --git a/Documentation/process/4.Coding.rst b/Documentation/process/4.Coding.rst
index 6df19943dd4d..26b106071364 100644
--- a/Documentation/process/4.Coding.rst
+++ b/Documentation/process/4.Coding.rst
@@ -307,7 +307,7 @@ variety of potential coding problems; it can also propose fixes for those
problems. Quite a few "semantic patches" for the kernel have been packaged
under the scripts/coccinelle directory; running "make coccicheck" will run
through those semantic patches and report on any problems found. See
-Documentation/coccinelle.txt for more information.
+Documentation/dev-tools/coccinelle.rst for more information.
Other kinds of portability errors are best found by compiling your code for
other architectures. If you do not happen to have an S/390 system or a
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 61e43cc3ed17..a430f6eee756 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -26,6 +26,7 @@ Below are the essential guides that every developer should read.
coding-style
email-clients
kernel-enforcement-statement
+ kernel-driver-statement
Other guides to the community that are of interest to most developers are:
diff --git a/Documentation/process/kernel-driver-statement.rst b/Documentation/process/kernel-driver-statement.rst
new file mode 100644
index 000000000000..60d9d868f300
--- /dev/null
+++ b/Documentation/process/kernel-driver-statement.rst
@@ -0,0 +1,199 @@
+Kernel Driver Statement
+-----------------------
+
+Position Statement on Linux Kernel Modules
+==========================================
+
+
+We, the undersigned Linux kernel developers, consider any closed-source
+Linux kernel module or driver to be harmful and undesirable. We have
+repeatedly found them to be detrimental to Linux users, businesses, and
+the greater Linux ecosystem. Such modules negate the openness,
+stability, flexibility, and maintainability of the Linux development
+model and shut their users off from the expertise of the Linux
+community. Vendors that provide closed-source kernel modules force their
+customers to give up key Linux advantages or choose new vendors.
+Therefore, in order to take full advantage of the cost savings and
+shared support benefits open source has to offer, we urge vendors to
+adopt a policy of supporting their customers on Linux with open-source
+kernel code.
+
+We speak only for ourselves, and not for any company we might work for
+today, have in the past, or will in the future.
+
+ - Dave Airlie
+ - Nick Andrew
+ - Jens Axboe
+ - Ralf Baechle
+ - Felipe Balbi
+ - Ohad Ben-Cohen
+ - Muli Ben-Yehuda
+ - Jiri Benc
+ - Arnd Bergmann
+ - Thomas Bogendoerfer
+ - Vitaly Bordug
+ - James Bottomley
+ - Josh Boyer
+ - Neil Brown
+ - Mark Brown
+ - David Brownell
+ - Michael Buesch
+ - Franck Bui-Huu
+ - Adrian Bunk
+ - François Cami
+ - Ralph Campbell
+ - Luiz Fernando N. Capitulino
+ - Mauro Carvalho Chehab
+ - Denis Cheng
+ - Jonathan Corbet
+ - Glauber Costa
+ - Alan Cox
+ - Magnus Damm
+ - Ahmed S. Darwish
+ - Robert P. J. Day
+ - Hans de Goede
+ - Arnaldo Carvalho de Melo
+ - Helge Deller
+ - Jean Delvare
+ - Mathieu Desnoyers
+ - Sven-Thorsten Dietrich
+ - Alexey Dobriyan
+ - Daniel Drake
+ - Alex Dubov
+ - Randy Dunlap
+ - Michael Ellerman
+ - Pekka Enberg
+ - Jan Engelhardt
+ - Mark Fasheh
+ - J. Bruce Fields
+ - Larry Finger
+ - Jeremy Fitzhardinge
+ - Mike Frysinger
+ - Kumar Gala
+ - Robin Getz
+ - Liam Girdwood
+ - Jan-Benedict Glaw
+ - Thomas Gleixner
+ - Brice Goglin
+ - Cyrill Gorcunov
+ - Andy Gospodarek
+ - Thomas Graf
+ - Krzysztof Halasa
+ - Harvey Harrison
+ - Stephen Hemminger
+ - Michael Hennerich
+ - Tejun Heo
+ - Benjamin Herrenschmidt
+ - Kristian Høgsberg
+ - Henrique de Moraes Holschuh
+ - Marcel Holtmann
+ - Mike Isely
+ - Takashi Iwai
+ - Olof Johansson
+ - Dave Jones
+ - Jesper Juhl
+ - Matthias Kaehlcke
+ - Kenji Kaneshige
+ - Jan Kara
+ - Jeremy Kerr
+ - Russell King
+ - Olaf Kirch
+ - Roel Kluin
+ - Hans-Jürgen Koch
+ - Auke Kok
+ - Peter Korsgaard
+ - Jiri Kosina
+ - Mariusz Kozlowski
+ - Greg Kroah-Hartman
+ - Michael Krufky
+ - Aneesh Kumar
+ - Clemens Ladisch
+ - Christoph Lameter
+ - Gunnar Larisch
+ - Anders Larsen
+ - Grant Likely
+ - John W. Linville
+ - Yinghai Lu
+ - Tony Luck
+ - Pavel Machek
+ - Matt Mackall
+ - Paul Mackerras
+ - Roland McGrath
+ - Patrick McHardy
+ - Kyle McMartin
+ - Paul Menage
+ - Thierry Merle
+ - Eric Miao
+ - Akinobu Mita
+ - Ingo Molnar
+ - James Morris
+ - Andrew Morton
+ - Paul Mundt
+ - Oleg Nesterov
+ - Luca Olivetti
+ - S.Çağlar Onur
+ - Pierre Ossman
+ - Keith Owens
+ - Venkatesh Pallipadi
+ - Nick Piggin
+ - Nicolas Pitre
+ - Evgeniy Polyakov
+ - Richard Purdie
+ - Mike Rapoport
+ - Sam Ravnborg
+ - Gerrit Renker
+ - Stefan Richter
+ - David Rientjes
+ - Luis R. Rodriguez
+ - Stefan Roese
+ - Francois Romieu
+ - Rami Rosen
+ - Stephen Rothwell
+ - Maciej W. Rozycki
+ - Mark Salyzyn
+ - Yoshinori Sato
+ - Deepak Saxena
+ - Holger Schurig
+ - Amit Shah
+ - Yoshihiro Shimoda
+ - Sergei Shtylyov
+ - Kay Sievers
+ - Sebastian Siewior
+ - Rik Snel
+ - Jes Sorensen
+ - Alexey Starikovskiy
+ - Alan Stern
+ - Timur Tabi
+ - Hirokazu Takata
+ - Eliezer Tamir
+ - Eugene Teo
+ - Doug Thompson
+ - FUJITA Tomonori
+ - Dmitry Torokhov
+ - Marcelo Tosatti
+ - Steven Toth
+ - Theodore Tso
+ - Matthias Urlichs
+ - Geert Uytterhoeven
+ - Arjan van de Ven
+ - Ivo van Doorn
+ - Rik van Riel
+ - Wim Van Sebroeck
+ - Hans Verkuil
+ - Horst H. von Brand
+ - Dmitri Vorobiev
+ - Anton Vorontsov
+ - Daniel Walker
+ - Johannes Weiner
+ - Harald Welte
+ - Matthew Wilcox
+ - Dan J. Williams
+ - Darrick J. Wong
+ - David Woodhouse
+ - Chris Wright
+ - Bryan Wu
+ - Rafael J. Wysocki
+ - Herbert Xu
+ - Vlad Yasevich
+ - Peter Zijlstra
+ - Bartlomiej Zolnierkiewicz
diff --git a/Documentation/process/submitting-drivers.rst b/Documentation/process/submitting-drivers.rst
index afb82ee0cbea..b38bf2054ce3 100644
--- a/Documentation/process/submitting-drivers.rst
+++ b/Documentation/process/submitting-drivers.rst
@@ -117,7 +117,7 @@ PM support:
anything. For the driver testing instructions see
Documentation/power/drivers-testing.txt and for a relatively
complete overview of the power management issues related to
- drivers see Documentation/power/admin-guide/devices.rst .
+ drivers see Documentation/driver-api/pm/devices.rst.
Control:
In general if there is active maintenance of a driver by
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index 733478ade91b..1ef19d3a3eee 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -621,14 +621,14 @@ The canonical patch subject line is::
The canonical patch message body contains the following:
- - A ``from`` line specifying the patch author (only needed if the person
- sending the patch is not the author).
-
- - An empty line.
+ - A ``from`` line specifying the patch author, followed by an empty
+ line (only needed if the person sending the patch is not the author).
- The body of the explanation, line wrapped at 75 columns, which will
be copied to the permanent changelog to describe this patch.
+ - An empty line.
+
- The ``Signed-off-by:`` lines, described above, which will
also go in the changelog.
diff --git a/Documentation/security/LSM.rst b/Documentation/security/LSM.rst
index d75778b0fa10..98522e0e1ee2 100644
--- a/Documentation/security/LSM.rst
+++ b/Documentation/security/LSM.rst
@@ -5,7 +5,7 @@ Linux Security Module Development
Based on https://lkml.org/lkml/2007/10/26/215,
a new LSM is accepted into the kernel when its intent (a description of
what it tries to protect against and in what cases one would expect to
-use it) has been appropriately documented in ``Documentation/security/LSM``.
+use it) has been appropriately documented in ``Documentation/security/LSM.rst``.
This allows an LSM's code to be easily compared to its goals, and so
that end users and distros can make a more informed decision about which
LSMs suit their requirements.
diff --git a/Documentation/security/credentials.rst b/Documentation/security/credentials.rst
index 038a7e19eff9..66a2e24939d8 100644
--- a/Documentation/security/credentials.rst
+++ b/Documentation/security/credentials.rst
@@ -196,7 +196,7 @@ The Linux kernel supports the following types of credentials:
When a process accesses a key, if not already present, it will normally be
cached on one of these keyrings for future accesses to find.
- For more information on using keys, see Documentation/security/keys.txt.
+ For more information on using keys, see ``Documentation/security/keys/*``.
5. LSM
diff --git a/Documentation/security/keys/request-key.rst b/Documentation/security/keys/request-key.rst
index b2d16abaa9e9..21e27238cec6 100644
--- a/Documentation/security/keys/request-key.rst
+++ b/Documentation/security/keys/request-key.rst
@@ -3,7 +3,7 @@ Key Request Service
===================
The key request service is part of the key retention service (refer to
-Documentation/security/core.rst). This document explains more fully how
+Documentation/security/keys/core.rst). This document explains more fully how
the requesting algorithm works.
The process starts by either the kernel requesting a service by calling
diff --git a/Documentation/sound/cards/joystick.rst b/Documentation/sound/cards/joystick.rst
index a6e468c81d02..488946fc1079 100644
--- a/Documentation/sound/cards/joystick.rst
+++ b/Documentation/sound/cards/joystick.rst
@@ -11,7 +11,7 @@ General
First of all, you need to enable GAMEPORT support on Linux kernel for
using a joystick with the ALSA driver. For the details of gameport
-support, refer to Documentation/input/joystick.txt.
+support, refer to Documentation/input/joydev/joystick.rst.
The joystick support of ALSA drivers is different between ISA and PCI
cards. In the case of ISA (PnP) cards, it's usually handled by the
diff --git a/Documentation/sound/hd-audio/notes.rst b/Documentation/sound/hd-audio/notes.rst
index f59c3cdbfaf4..9f7347830ba4 100644
--- a/Documentation/sound/hd-audio/notes.rst
+++ b/Documentation/sound/hd-audio/notes.rst
@@ -192,7 +192,7 @@ preset model instead of PCI (and codec-) SSID look-up.
What ``model`` option values are available depends on the codec chip.
Check your codec chip from the codec proc file (see "Codec Proc-File"
section below). It will show the vendor/product name of your codec
-chip. Then, see Documentation/sound/HD-Audio-Models.rst file,
+chip. Then, see Documentation/sound/hd-audio/models.rst file,
the section of HD-audio driver. You can find a list of codecs
and ``model`` options belonging to each codec. For example, for Realtek
ALC262 codec chip, pass ``model=ultra`` for devices that are compatible
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index 58ffa3f5bda7..a0b268466cb1 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -2498,7 +2498,7 @@ Mic boost
Mic-boost switch is set as “Mic Boost” or “Mic Boost (6dB)”.
More precise information can be found in
-``Documentation/sound/alsa/ControlNames.txt``.
+``Documentation/sound/designs/control-names.rst``.
Access Flags
------------
diff --git a/Documentation/sysctl/README b/Documentation/sysctl/README
index 91f54ffa0077..d5f24ab0ecc3 100644
--- a/Documentation/sysctl/README
+++ b/Documentation/sysctl/README
@@ -60,7 +60,7 @@ debug/ <empty>
dev/ device specific information (eg dev/cdrom/info)
fs/ specific filesystems
filehandle, inode, dentry and quota tuning
- binfmt_misc <Documentation/binfmt_misc.txt>
+ binfmt_misc <Documentation/admin-guide/binfmt-misc.rst>
kernel/ global kernel info / tuning
miscellaneous stuff
net/ networking stuff, for documentation look in:
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 35e17f748ca7..6c00c1e2743f 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -277,7 +277,7 @@ in a mount namespace.
----------------------------------------------------------
Documentation for the files in /proc/sys/fs/binfmt_misc is
-in Documentation/binfmt_misc.txt.
+in Documentation/admin-guide/binfmt-misc.rst.
3. /proc/sys/fs/mqueue - POSIX message queues filesystem
diff --git a/Documentation/timers/highres.txt b/Documentation/timers/highres.txt
index e8789976e77c..9d88f67781c2 100644
--- a/Documentation/timers/highres.txt
+++ b/Documentation/timers/highres.txt
@@ -4,10 +4,10 @@ High resolution timers and dynamic ticks design notes
Further information can be found in the paper of the OLS 2006 talk "hrtimers
and beyond". The paper is part of the OLS 2006 Proceedings Volume 1, which can
be found on the OLS website:
-http://www.linuxsymposium.org/2006/linuxsymposium_procv1.pdf
+https://www.kernel.org/doc/ols/2006/ols2006v1-pages-333-346.pdf
The slides to this talk are available from:
-http://tglx.de/projects/hrtimers/ols2006-hrtimers.pdf
+http://www.cs.columbia.edu/~nahum/w6998/papers/ols2006-hrtimers-slides.pdf
The slides contain five figures (pages 2, 15, 18, 20, 22), which illustrate the
changes in the time(r) related Linux subsystems. Figure #1 (p. 2) shows the
diff --git a/Documentation/trace/ftrace-uses.rst b/Documentation/trace/ftrace-uses.rst
new file mode 100644
index 000000000000..8494a801d341
--- /dev/null
+++ b/Documentation/trace/ftrace-uses.rst
@@ -0,0 +1,293 @@
+=================================
+Using ftrace to hook to functions
+=================================
+
+.. Copyright 2017 VMware Inc.
+.. Author: Steven Rostedt <srostedt@goodmis.org>
+.. License: The GNU Free Documentation License, Version 1.2
+.. (dual licensed under the GPL v2)
+
+Written for: 4.14
+
+Introduction
+============
+
+The ftrace infrastructure was originially created to attach callbacks to the
+beginning of functions in order to record and trace the flow of the kernel.
+But callbacks to the start of a function can have other use cases. Either
+for live kernel patching, or for security monitoring. This document describes
+how to use ftrace to implement your own function callbacks.
+
+
+The ftrace context
+==================
+
+WARNING: The ability to add a callback to almost any function within the
+kernel comes with risks. A callback can be called from any context
+(normal, softirq, irq, and NMI). Callbacks can also be called just before
+going to idle, during CPU bring up and takedown, or going to user space.
+This requires extra care to what can be done inside a callback. A callback
+can be called outside the protective scope of RCU.
+
+The ftrace infrastructure has some protections agains recursions and RCU
+but one must still be very careful how they use the callbacks.
+
+
+The ftrace_ops structure
+========================
+
+To register a function callback, a ftrace_ops is required. This structure
+is used to tell ftrace what function should be called as the callback
+as well as what protections the callback will perform and not require
+ftrace to handle.
+
+There is only one field that is needed to be set when registering
+an ftrace_ops with ftrace::
+
+.. code-block: c
+
+ struct ftrace_ops ops = {
+ .func = my_callback_func,
+ .flags = MY_FTRACE_FLAGS
+ .private = any_private_data_structure,
+ };
+
+Both .flags and .private are optional. Only .func is required.
+
+To enable tracing call::
+
+.. c:function:: register_ftrace_function(&ops);
+
+To disable tracing call::
+
+.. c:function:: unregister_ftrace_function(&ops);
+
+The above is defined by including the header::
+
+.. c:function:: #include <linux/ftrace.h>
+
+The registered callback will start being called some time after the
+register_ftrace_function() is called and before it returns. The exact time
+that callbacks start being called is dependent upon architecture and scheduling
+of services. The callback itself will have to handle any synchronization if it
+must begin at an exact moment.
+
+The unregister_ftrace_function() will guarantee that the callback is
+no longer being called by functions after the unregister_ftrace_function()
+returns. Note that to perform this guarantee, the unregister_ftrace_function()
+may take some time to finish.
+
+
+The callback function
+=====================
+
+The prototype of the callback function is as follows (as of v4.14)::
+
+.. code-block: c
+
+ void callback_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs);
+
+@ip
+ This is the instruction pointer of the function that is being traced.
+ (where the fentry or mcount is within the function)
+
+@parent_ip
+ This is the instruction pointer of the function that called the
+ the function being traced (where the call of the function occurred).
+
+@op
+ This is a pointer to ftrace_ops that was used to register the callback.
+ This can be used to pass data to the callback via the private pointer.
+
+@regs
+ If the FTRACE_OPS_FL_SAVE_REGS or FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
+ flags are set in the ftrace_ops structure, then this will be pointing
+ to the pt_regs structure like it would be if an breakpoint was placed
+ at the start of the function where ftrace was tracing. Otherwise it
+ either contains garbage, or NULL.
+
+
+The ftrace FLAGS
+================
+
+The ftrace_ops flags are all defined and documented in include/linux/ftrace.h.
+Some of the flags are used for internal infrastructure of ftrace, but the
+ones that users should be aware of are the following:
+
+FTRACE_OPS_FL_SAVE_REGS
+ If the callback requires reading or modifying the pt_regs
+ passed to the callback, then it must set this flag. Registering
+ a ftrace_ops with this flag set on an architecture that does not
+ support passing of pt_regs to the callback will fail.
+
+FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
+ Similar to SAVE_REGS but the registering of a
+ ftrace_ops on an architecture that does not support passing of regs
+ will not fail with this flag set. But the callback must check if
+ regs is NULL or not to determine if the architecture supports it.
+
+FTRACE_OPS_FL_RECURSION_SAFE
+ By default, a wrapper is added around the callback to
+ make sure that recursion of the function does not occur. That is,
+ if a function that is called as a result of the callback's execution
+ is also traced, ftrace will prevent the callback from being called
+ again. But this wrapper adds some overhead, and if the callback is
+ safe from recursion, it can set this flag to disable the ftrace
+ protection.
+
+ Note, if this flag is set, and recursion does occur, it could cause
+ the system to crash, and possibly reboot via a triple fault.
+
+ It is OK if another callback traces a function that is called by a
+ callback that is marked recursion safe. Recursion safe callbacks
+ must never trace any function that are called by the callback
+ itself or any nested functions that those functions call.
+
+ If this flag is set, it is possible that the callback will also
+ be called with preemption enabled (when CONFIG_PREEMPT is set),
+ but this is not guaranteed.
+
+FTRACE_OPS_FL_IPMODIFY
+ Requires FTRACE_OPS_FL_SAVE_REGS set. If the callback is to "hijack"
+ the traced function (have another function called instead of the
+ traced function), it requires setting this flag. This is what live
+ kernel patches uses. Without this flag the pt_regs->ip can not be
+ modified.
+
+ Note, only one ftrace_ops with FTRACE_OPS_FL_IPMODIFY set may be
+ registered to any given function at a time.
+
+FTRACE_OPS_FL_RCU
+ If this is set, then the callback will only be called by functions
+ where RCU is "watching". This is required if the callback function
+ performs any rcu_read_lock() operation.
+
+ RCU stops watching when the system goes idle, the time when a CPU
+ is taken down and comes back online, and when entering from kernel
+ to user space and back to kernel space. During these transitions,
+ a callback may be executed and RCU synchronization will not protect
+ it.
+
+
+Filtering which functions to trace
+==================================
+
+If a callback is only to be called from specific functions, a filter must be
+set up. The filters are added by name, or ip if it is known.
+
+.. code-block: c
+
+ int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+
+@ops
+ The ops to set the filter with
+
+@buf
+ The string that holds the function filter text.
+@len
+ The length of the string.
+
+@reset
+ Non-zero to reset all filters before applying this filter.
+
+Filters denote which functions should be enabled when tracing is enabled.
+If @buf is NULL and reset is set, all functions will be enabled for tracing.
+
+The @buf can also be a glob expression to enable all functions that
+match a specific pattern.
+
+See Filter Commands in :file:`Documentation/trace/ftrace.txt`.
+
+To just trace the schedule function::
+
+.. code-block: c
+
+ ret = ftrace_set_filter(&ops, "schedule", strlen("schedule"), 0);
+
+To add more functions, call the ftrace_set_filter() more than once with the
+@reset parameter set to zero. To remove the current filter set and replace it
+with new functions defined by @buf, have @reset be non-zero.
+
+To remove all the filtered functions and trace all functions::
+
+.. code-block: c
+
+ ret = ftrace_set_filter(&ops, NULL, 0, 1);
+
+
+Sometimes more than one function has the same name. To trace just a specific
+function in this case, ftrace_set_filter_ip() can be used.
+
+.. code-block: c
+
+ ret = ftrace_set_filter_ip(&ops, ip, 0, 0);
+
+Although the ip must be the address where the call to fentry or mcount is
+located in the function. This function is used by perf and kprobes that
+gets the ip address from the user (usually using debug info from the kernel).
+
+If a glob is used to set the filter, functions can be added to a "notrace"
+list that will prevent those functions from calling the callback.
+The "notrace" list takes precedence over the "filter" list. If the
+two lists are non-empty and contain the same functions, the callback will not
+be called by any function.
+
+An empty "notrace" list means to allow all functions defined by the filter
+to be traced.
+
+.. code-block: c
+
+ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+
+This takes the same parameters as ftrace_set_filter() but will add the
+functions it finds to not be traced. This is a separate list from the
+filter list, and this function does not modify the filter list.
+
+A non-zero @reset will clear the "notrace" list before adding functions
+that match @buf to it.
+
+Clearing the "notrace" list is the same as clearing the filter list
+
+.. code-block: c
+
+ ret = ftrace_set_notrace(&ops, NULL, 0, 1);
+
+The filter and notrace lists may be changed at any time. If only a set of
+functions should call the callback, it is best to set the filters before
+registering the callback. But the changes may also happen after the callback
+has been registered.
+
+If a filter is in place, and the @reset is non-zero, and @buf contains a
+matching glob to functions, the switch will happen during the time of
+the ftrace_set_filter() call. At no time will all functions call the callback.
+
+.. code-block: c
+
+ ftrace_set_filter(&ops, "schedule", strlen("schedule"), 1);
+
+ register_ftrace_function(&ops);
+
+ msleep(10);
+
+ ftrace_set_filter(&ops, "try_to_wake_up", strlen("try_to_wake_up"), 1);
+
+is not the same as:
+
+.. code-block: c
+
+ ftrace_set_filter(&ops, "schedule", strlen("schedule"), 1);
+
+ register_ftrace_function(&ops);
+
+ msleep(10);
+
+ ftrace_set_filter(&ops, NULL, 0, 1);
+
+ ftrace_set_filter(&ops, "try_to_wake_up", strlen("try_to_wake_up"), 0);
+
+As the latter will have a short time where all functions will call
+the callback, between the time of the reset, and the time of the
+new setting of the filter.
diff --git a/Documentation/trace/intel_th.txt b/Documentation/trace/intel_th.txt
index f92070e7dde0..7a57165c2492 100644
--- a/Documentation/trace/intel_th.txt
+++ b/Documentation/trace/intel_th.txt
@@ -37,7 +37,7 @@ description is at Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth.
STH registers an stm class device, through which it provides interface
to userspace and kernelspace software trace sources. See
-Documentation/tracing/stm.txt for more information on that.
+Documentation/trace/stm.txt for more information on that.
MSU can be configured to collect trace data into a system memory
buffer, which can later on be read from its device nodes via read() or
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index fbc397d17e98..441a4b9b666f 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -773,7 +773,7 @@ host:
# cat /dev/usb/lp0
More advanced testing can be done with the prn_example
-described in Documentation/usb/gadget-printer.txt.
+described in Documentation/usb/gadget_printer.txt.
20. UAC1 function (virtual ALSA card, using u_audio API)
diff --git a/Documentation/watchdog/hpwdt.txt b/Documentation/watchdog/hpwdt.txt
index 7a9f635d0258..6d866c537127 100644
--- a/Documentation/watchdog/hpwdt.txt
+++ b/Documentation/watchdog/hpwdt.txt
@@ -15,7 +15,7 @@ Last reviewed: 05/20/2016
Watchdog functionality is enabled like any other common watchdog driver. That
is, an application needs to be started that kicks off the watchdog timer. A
- basic application exists in the Documentation/watchdog/src directory called
+ basic application exists in tools/testing/selftests/watchdog/ named
watchdog-test.c. Simply compile the C file and kick it off. If the system
gets into a bad state and hangs, the HPE ProLiant iLO timer register will
not be updated in a timely fashion and a hardware system reset (also known as
diff --git a/Documentation/watchdog/pcwd-watchdog.txt b/Documentation/watchdog/pcwd-watchdog.txt
index 4f68052395c0..b8e60a441a43 100644
--- a/Documentation/watchdog/pcwd-watchdog.txt
+++ b/Documentation/watchdog/pcwd-watchdog.txt
@@ -25,7 +25,7 @@ Last reviewed: 10/05/2007
If you want to write a program to be compatible with the PC Watchdog
driver, simply use of modify the watchdog test program:
- Documentation/watchdog/src/watchdog-test.c
+ tools/testing/selftests/watchdog/watchdog-test.c
Other IOCTL functions include:
diff --git a/MAINTAINERS b/MAINTAINERS
index 2f4e462aa4a2..026644adfd8f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4234,7 +4234,7 @@ S: Maintained
F: drivers/dma/
F: include/linux/dmaengine.h
F: Documentation/devicetree/bindings/dma/
-F: Documentation/dmaengine/
+F: Documentation/driver-api/dmaengine/
T: git git://git.infradead.org/users/vkoul/slave-dma.git
DMA MAPPING HELPERS
@@ -4906,13 +4906,19 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/highbank*
-EDAC-CAVIUM
+EDAC-CAVIUM OCTEON
M: Ralf Baechle <ralf@linux-mips.org>
M: David Daney <david.daney@cavium.com>
L: linux-edac@vger.kernel.org
L: linux-mips@linux-mips.org
S: Supported
F: drivers/edac/octeon_edac*
+
+EDAC-CAVIUM THUNDERX
+M: David Daney <david.daney@cavium.com>
+M: Jan Glauber <jglauber@cavium.com>
+L: linux-edac@vger.kernel.org
+S: Supported
F: drivers/edac/thunderx_edac*
EDAC-CORE
@@ -5213,8 +5219,7 @@ F: fs/ext4/
Extended Verification Module (EVM)
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
-L: linux-ima-devel@lists.sourceforge.net
-L: linux-security-module@vger.kernel.org
+L: linux-integrity@vger.kernel.org
S: Supported
F: security/integrity/evm/
@@ -6841,9 +6846,7 @@ L: linux-crypto@vger.kernel.org
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
-L: linux-ima-devel@lists.sourceforge.net
-L: linux-ima-user@lists.sourceforge.net
-L: linux-security-module@vger.kernel.org
+L: linux-integrity@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
S: Supported
F: security/integrity/ima/
@@ -7626,8 +7629,7 @@ F: kernel/kexec*
KEYS-ENCRYPTED
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
-M: David Safford <safford@us.ibm.com>
-L: linux-security-module@vger.kernel.org
+L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
S: Supported
F: Documentation/security/keys/trusted-encrypted.rst
@@ -7635,9 +7637,8 @@ F: include/keys/encrypted-type.h
F: security/keys/encrypted-keys/
KEYS-TRUSTED
-M: David Safford <safford@us.ibm.com>
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
-L: linux-security-module@vger.kernel.org
+L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
S: Supported
F: Documentation/security/keys/trusted-encrypted.rst
@@ -7745,6 +7746,11 @@ S: Maintained
F: Documentation/scsi/53c700.txt
F: drivers/scsi/53c700*
+LEAKING_ADDRESSES
+M: Tobin C. Harding <me@tobin.cc>
+S: Maintained
+F: scripts/leaking_addresses.pl
+
LED SUBSYSTEM
M: Richard Purdie <rpurdie@rpsys.net>
M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
@@ -10336,7 +10342,6 @@ F: drivers/pci/host/vmd.c
PCI DRIVER FOR MICROSEMI SWITCHTEC
M: Kurt Schwemmer <kurt.schwemmer@microsemi.com>
-M: Stephen Bates <stephen.bates@microsemi.com>
M: Logan Gunthorpe <logang@deltatee.com>
L: linux-pci@vger.kernel.org
S: Maintained
@@ -10401,6 +10406,7 @@ F: drivers/pci/dwc/*keystone*
PCI ENDPOINT SUBSYSTEM
M: Kishon Vijay Abraham I <kishon@ti.com>
+M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
L: linux-pci@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git
S: Supported
@@ -10452,6 +10458,15 @@ F: include/linux/pci*
F: arch/x86/pci/
F: arch/x86/kernel/quirks.c
+PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
+M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+L: linux-pci@vger.kernel.org
+Q: http://patchwork.ozlabs.org/project/linux-pci/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/
+S: Supported
+F: drivers/pci/host/
+F: drivers/pci/dwc/
+
PCIE DRIVER FOR AXIS ARTPEC
M: Niklas Cassel <niklas.cassel@axis.com>
M: Jesper Nilsson <jesper.nilsson@axis.com>
@@ -10471,7 +10486,6 @@ F: drivers/pci/host/pci-thunder-*
PCIE DRIVER FOR HISILICON
M: Zhou Wang <wangzhou1@hisilicon.com>
-M: Gabriele Paoloni <gabriele.paoloni@huawei.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
@@ -12048,6 +12062,12 @@ L: linux-mmc@vger.kernel.org
S: Maintained
F: drivers/mmc/host/sdhci-spear.c
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) TI OMAP DRIVER
+M: Kishon Vijay Abraham I <kishon@ti.com>
+L: linux-mmc@vger.kernel.org
+S: Maintained
+F: drivers/mmc/host/sdhci-omap.c
+
SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER
M: Scott Bauer <scott.bauer@intel.com>
M: Jonathan Derrick <jonathan.derrick@intel.com>
@@ -13598,23 +13618,14 @@ F: drivers/platform/x86/toshiba-wmi.c
TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de>
-M: Marcel Selhorst <tpmdd@selhorst.net>
M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
R: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
-W: http://tpmdd.sourceforge.net
-L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
-Q: https://patchwork.kernel.org/project/tpmdd-devel/list/
+L: linux-integrity@vger.kernel.org
+Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.infradead.org/users/jjs/linux-tpmdd.git
S: Maintained
F: drivers/char/tpm/
-TPM IBM_VTPM DEVICE DRIVER
-M: Ashley Lai <ashleydlai@gmail.com>
-W: http://tpmdd.sourceforge.net
-L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
-S: Maintained
-F: drivers/char/tpm/tpm_ibmvtpm*
-
TRACING
M: Steven Rostedt <rostedt@goodmis.org>
M: Ingo Molnar <mingo@redhat.com>
diff --git a/Makefile b/Makefile
index bee2033e7d1d..4b0424f53823 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -1459,7 +1459,8 @@ $(help-board-dirs): help-%:
# Documentation targets
# ---------------------------------------------------------------------------
-DOC_TARGETS := xmldocs latexdocs pdfdocs htmldocs epubdocs cleandocs linkcheckdocs
+DOC_TARGETS := xmldocs latexdocs pdfdocs htmldocs epubdocs cleandocs \
+ linkcheckdocs dochelp refcheckdocs
PHONY += $(DOC_TARGETS)
$(DOC_TARGETS): scripts_basic FORCE
$(Q)$(MAKE) $(build)=Documentation $@
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 948c648fea00..0fcd82f01388 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -154,30 +154,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
set_fs(fs);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * Note that we now dump the code first, just in case the backtrace
+ * kills us.
*/
- fs = get_fs();
- set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
- bad = __get_user(val, &((u16 *)addr)[i]);
+ bad = get_user(val, &((u16 *)addr)[i]);
else
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@@ -188,8 +184,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
printk("%sCode: %s\n", lvl, str);
+}
- set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ mm_segment_t fs;
+
+ if (!user_mode(regs)) {
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ __dump_instr(lvl, regs);
+ set_fs(fs);
+ } else {
+ __dump_instr(lvl, regs);
+ }
}
#ifdef CONFIG_ARM_UNWIND
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 2d45d18b1a5e..6b7df6fd2448 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -29,6 +29,7 @@
#include <linux/platform_data/pcf857x.h>
#include <linux/platform_data/at24.h>
#include <linux/smc91x.h>
+#include <linux/gpio/machine.h>
#include <linux/gpio.h>
#include <linux/leds.h>
@@ -52,7 +53,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/mfd/da903x.h>
-#include <linux/platform_data/sht15.h>
#include "devices.h"
#include "generic.h"
@@ -137,17 +137,18 @@ static unsigned long sg2_im2_unified_pin_config[] __initdata = {
GPIO10_GPIO, /* large basic connector pin 23 */
};
-static struct sht15_platform_data platform_data_sht15 = {
- .gpio_data = 100,
- .gpio_sck = 98,
+static struct gpiod_lookup_table sht15_gpiod_table = {
+ .dev_id = "sht15",
+ .table = {
+ /* FIXME: should this have |GPIO_OPEN_DRAIN set? */
+ GPIO_LOOKUP("gpio-pxa", 100, "data", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", 98, "clk", GPIO_ACTIVE_HIGH),
+ },
};
static struct platform_device sht15 = {
.name = "sht15",
.id = -1,
- .dev = {
- .platform_data = &platform_data_sht15,
- },
};
static struct regulator_consumer_supply stargate2_sensor_3_con[] = {
@@ -608,6 +609,7 @@ static void __init imote2_init(void)
imote2_stargate2_init();
+ gpiod_add_lookup_table(&sht15_gpiod_table);
platform_add_devices(imote2_devices, ARRAY_SIZE(imote2_devices));
i2c_register_board_info(0, imote2_i2c_board_info,
@@ -988,6 +990,7 @@ static void __init stargate2_init(void)
imote2_stargate2_init();
+ gpiod_add_lookup_table(&sht15_gpiod_table);
platform_add_devices(ARRAY_AND_SIZE(stargate2_devices));
i2c_register_board_info(0, ARRAY_AND_SIZE(stargate2_i2c_board_info));
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index b99a27372965..26396ef53bde 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -682,8 +682,7 @@
};
mmc0: mmc@11230000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11230000 0 0x1000>;
interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_0>,
@@ -693,8 +692,7 @@
};
mmc1: mmc@11240000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11240000 0 0x1000>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_1>,
@@ -704,8 +702,7 @@
};
mmc2: mmc@11250000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11250000 0 0x1000>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_2>,
@@ -715,8 +712,7 @@
};
mmc3: mmc@11260000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11260000 0 0x1000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_3>,
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index df7acea3747a..4674f1efbe7a 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -575,6 +575,7 @@ static int __init ar7_register_uarts(void)
uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
+ uart_port.flags = UPF_FIXED_TYPE;
uart_port.regshift = 2;
uart_port.line = 0;
@@ -653,6 +654,10 @@ static int __init ar7_register_devices(void)
u32 val;
int res;
+ res = ar7_gpio_init();
+ if (res)
+ pr_warn("unable to register gpios: %d\n", res);
+
res = ar7_register_uarts();
if (res)
pr_err("unable to setup uart(s): %d\n", res);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index 4fd83336131a..dd53987a690f 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -246,8 +246,6 @@ void __init prom_init(void)
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
-
- ar7_gpio_init();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 406072e26752..87dcac2447c8 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -591,11 +591,11 @@ void __init bmips_cpu_setup(void)
/* Flush and enable RAC */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 7c62967d672c..59247af5fd45 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -646,6 +646,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
hnow_r = hpte_new_to_old_r(hnow_r);
}
+
+ /*
+ * If the HPT is being resized, don't update the HPTE,
+ * instead let the guest retry after the resize operation is complete.
+ * The synchronization for hpte_setup_done test vs. set is provided
+ * by the HPTE lock.
+ */
+ if (!kvm->arch.hpte_setup_done)
+ goto out_unlock;
+
if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 73bf1ebfa78f..8d43cf205d34 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2705,11 +2705,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s).
+ * If the hpte_setup_done flag has been cleared, don't go into the
+ * guest because that means a HPT resize operation is in progress.
*/
local_irq_disable();
hard_irq_disable();
if (lazy_irq_pending() || need_resched() ||
- recheck_signals(&core_info)) {
+ recheck_signals(&core_info) ||
+ (!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) {
local_irq_enable();
vc->vcore_state = VCORE_INACTIVE;
/* Unlock all except the primary vcore */
@@ -3078,7 +3081,7 @@ out:
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
- int n_ceded, i;
+ int n_ceded, i, r;
struct kvmppc_vcore *vc;
struct kvm_vcpu *v;
@@ -3132,6 +3135,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) {
+ /* See if the HPT and VRMA are ready to go */
+ if (!kvm_is_radix(vcpu->kvm) &&
+ !vcpu->kvm->arch.hpte_setup_done) {
+ spin_unlock(&vc->lock);
+ r = kvmppc_hv_setup_htab_rma(vcpu);
+ spin_lock(&vc->lock);
+ if (r) {
+ kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ kvm_run->fail_entry.hardware_entry_failure_reason = 0;
+ vcpu->arch.ret = r;
+ break;
+ }
+ }
+
if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
kvmppc_vcore_end_preempt(vc);
@@ -3249,13 +3266,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
smp_mb();
- /* On the first time here, set up HTAB and VRMA */
- if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) {
- r = kvmppc_hv_setup_htab_rma(vcpu);
- if (r)
- goto out;
- }
-
flush_all_to_thread(current);
/* Save userspace EBB and other register values */
@@ -3303,7 +3313,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
mtspr(SPRN_VRSAVE, user_vrsave);
- out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
return r;
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
index 93b945597ecf..7cfba738f104 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
@@ -157,8 +157,8 @@ LABEL skip_ %I
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -178,8 +178,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index 8fe6338bcc84..16c4ccb1f154 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -155,8 +155,8 @@ LABEL skip_ %I
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -176,8 +176,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index c1a125e47ff3..3a091cea36c5 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -253,7 +253,7 @@ extern int force_personality32;
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
- (TASK_SIZE / 3 * 2))
+ (DEFAULT_MAP_WINDOW / 3 * 2))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 236999c54edc..c60922a66385 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -22,7 +22,7 @@ obj-y += common.o
obj-y += rdrand.o
obj-y += match.o
obj-y += bugs.o
-obj-y += aperfmperf.o
+obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 957813e0180d..0ee83321a313 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -42,6 +42,10 @@ static void aperfmperf_snapshot_khz(void *dummy)
s64 time_delta = ktime_ms_delta(now, s->time);
unsigned long flags;
+ /* Don't bother re-computing within the cache threshold time. */
+ if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+ return;
+
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
@@ -70,7 +74,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
unsigned int arch_freq_get_on_cpu(int cpu)
{
- s64 time_delta;
unsigned int khz;
if (!cpu_khz)
@@ -79,12 +82,6 @@ unsigned int arch_freq_get_on_cpu(int cpu)
if (!static_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
- /* Don't bother re-computing within the cache threshold time. */
- time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
- khz = per_cpu(samples.khz, cpu);
- if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
- return khz;
-
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
khz = per_cpu(samples.khz, cpu);
if (khz)
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 4378a729b933..6b7e17bf0b71 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -78,11 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
if (cpu_has(c, X86_FEATURE_TSC)) {
- unsigned int freq = arch_freq_get_on_cpu(cpu);
+ unsigned int freq = cpufreq_quick_get(cpu);
if (!freq)
- freq = cpufreq_quick_get(cpu);
- if (!freq)
freq = cpu_khz;
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
freq / 1000, (freq % 1000));
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 6107ee1cb8d5..014cb2fc47ff 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -92,8 +92,6 @@ static const __initdata struct idt_data def_idts[] = {
INTG(X86_TRAP_DF, double_fault),
#endif
INTG(X86_TRAP_DB, debug),
- INTG(X86_TRAP_NMI, nmi),
- INTG(X86_TRAP_BP, int3),
#ifdef CONFIG_X86_MCE
INTG(X86_TRAP_MC, &machine_check),
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ad59edd84de7..65a0ccdc3050 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -194,6 +194,12 @@ static void smp_callin(void)
smp_store_cpu_info(cpuid);
/*
+ * The topology information must be up to date before
+ * calibrate_delay() and notify_cpu_starting().
+ */
+ set_cpu_sibling_map(raw_smp_processor_id());
+
+ /*
* Get our bogomips.
* Update loops_per_jiffy in cpu_data. Previous call to
* smp_store_cpu_info() stored a value that is close but not as
@@ -203,11 +209,6 @@ static void smp_callin(void)
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
pr_debug("Stack at about %p\n", &cpuid);
- /*
- * This must be done before setting cpu_online_mask
- * or calling notify_cpu_starting.
- */
- set_cpu_sibling_map(raw_smp_processor_id());
wmb();
notify_cpu_starting(cpuid);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 67db4f43309e..5a6b8f809792 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -209,9 +209,6 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
if (fixup_exception(regs, trapnr))
return 0;
- if (fixup_bug(regs, trapnr))
- return 0;
-
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
die(str, regs, error_code);
@@ -292,6 +289,13 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+ /*
+ * WARN*()s end up here; fix them up before we call the
+ * notifier chain.
+ */
+ if (!user_mode(regs) && fixup_bug(regs, trapnr))
+ return;
+
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) {
cond_local_irq_enable(regs);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 796d96bb0821..ad2b925a808e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1346,12 +1346,10 @@ void __init tsc_init(void)
unsigned long calibrate_delay_is_known(void)
{
int sibling, cpu = smp_processor_id();
- struct cpumask *mask = topology_core_cpumask(cpu);
+ int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
+ const struct cpumask *mask = topology_core_cpumask(cpu);
- if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
- return 0;
-
- if (!mask)
+ if (tsc_disabled || !constant_tsc || !mask)
return 0;
sibling = cpumask_any_but(mask, cpu);
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index b95007e7c1b3..a3f973b2c97a 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -279,7 +279,7 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
if (!stack_access_ok(state, addr, sizeof(long)))
return false;
- *val = READ_ONCE_TASK_STACK(state->task, *(unsigned long *)addr);
+ *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
return true;
}
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 16c5f37933a2..0286327e65fa 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -40,7 +40,7 @@ static char sme_cmdline_off[] __initdata = "off";
* section is later cleared.
*/
u64 sme_me_mask __section(.data) = 0;
-EXPORT_SYMBOL_GPL(sme_me_mask);
+EXPORT_SYMBOL(sme_me_mask);
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 350f7096baac..7913b6921959 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void)
eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
- if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
- __this_cpu_read(cpu_info.x86_model) == 15) {
+ if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
eax.split.num_counters = 2;
eax.split.bit_width = 40;
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 1ce37ae0ce56..0a083342ec8c 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -363,7 +363,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
unsigned int cryptlen = req->cryptlen;
u8 *authtag = pctx->auth_tag;
u8 *odata = pctx->odata;
- u8 *iv = req->iv;
+ u8 *iv = pctx->idata;
int err;
cryptlen -= authsize;
@@ -379,6 +379,8 @@ static int crypto_ccm_decrypt(struct aead_request *req)
if (req->src != req->dst)
dst = pctx->dst;
+ memcpy(iv, req->iv, 16);
+
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_decrypt_done, req);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6804ddab3052..8082871b409a 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -160,6 +160,14 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
+static bool acpi_sleep_no_lps0;
+
+static int __init init_no_lps0(const struct dmi_system_id *d)
+{
+ acpi_sleep_no_lps0 = true;
+ return 0;
+}
+
static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{
.callback = init_old_suspend_ordering,
@@ -343,6 +351,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=196907
+ * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
+ * S0 Idle firmware interface.
+ */
+ {
+ .callback = init_no_lps0,
+ .ident = "Dell XPS13 9360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
+ },
+ },
{},
};
@@ -485,6 +506,7 @@ static void acpi_pm_end(void)
}
#else /* !CONFIG_ACPI_SLEEP */
#define acpi_target_sleep_state ACPI_STATE_S0
+#define acpi_sleep_no_lps0 (false)
static inline void acpi_sleep_dmi_check(void) {}
#endif /* CONFIG_ACPI_SLEEP */
@@ -863,6 +885,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if (lps0_device_handle)
return 0;
+ if (acpi_sleep_no_lps0) {
+ acpi_handle_info(adev->handle,
+ "Low Power S0 Idle interface disabled\n");
+ return 0;
+ }
+
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
return 0;
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 0368fd7b3a41..3a1535d812d8 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,6 +6,7 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select IRQ_DOMAIN if REGMAP_IRQ
+ select REGMAP_HWSPINLOCK if HWSPINLOCK=y
bool
config REGCACHE_COMPRESSED
@@ -37,3 +38,6 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
+
+config REGMAP_HWSPINLOCK
+ bool
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 2a4435d76028..8641183cac2f 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -157,6 +157,8 @@ struct regmap {
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
+
+ struct hwspinlock *hwlock;
};
struct regcache_ops {
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index edd9a839d004..c7150dd264d5 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -102,7 +102,7 @@ static int regmap_spi_read(void *context,
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
-static struct regmap_bus regmap_spi = {
+static const struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
.async_write = regmap_spi_async_write,
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 4a36e415e938..0bfb8ed244d5 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -83,7 +83,7 @@ static int regmap_spmi_base_write(void *context, const void *data,
count - 1);
}
-static struct regmap_bus regmap_spmi_base = {
+static const struct regmap_bus regmap_spmi_base = {
.read = regmap_spmi_base_read,
.write = regmap_spmi_base_write,
.gather_write = regmap_spmi_base_gather_write,
@@ -203,7 +203,7 @@ static int regmap_spmi_ext_write(void *context, const void *data,
count - 2);
}
-static struct regmap_bus regmap_spmi_ext = {
+static const struct regmap_bus regmap_spmi_ext = {
.read = regmap_spmi_ext_read,
.write = regmap_spmi_ext_write,
.gather_write = regmap_spmi_ext_gather_write,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index b9a779a4a739..8d516a9bfc01 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/log2.h>
+#include <linux/hwspinlock.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -413,6 +414,51 @@ static unsigned int regmap_parse_64_native(const void *buf)
}
#endif
+#ifdef REGMAP_HWSPINLOCK
+static void regmap_lock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irqsave(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
+ &map->spinlock_flags);
+}
+
+static void regmap_unlock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irq(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irqrestore(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
+}
+#endif
+
static void regmap_lock_mutex(void *__map)
{
struct regmap *map = __map;
@@ -627,6 +673,34 @@ struct regmap *__regmap_init(struct device *dev,
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
+ } else if (config->hwlock_id) {
+#ifdef REGMAP_HWSPINLOCK
+ map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
+ if (!map->hwlock) {
+ ret = -ENXIO;
+ goto err_map;
+ }
+
+ switch (config->hwlock_mode) {
+ case HWLOCK_IRQSTATE:
+ map->lock = regmap_lock_hwlock_irqsave;
+ map->unlock = regmap_unlock_hwlock_irqrestore;
+ break;
+ case HWLOCK_IRQ:
+ map->lock = regmap_lock_hwlock_irq;
+ map->unlock = regmap_unlock_hwlock_irq;
+ break;
+ default:
+ map->lock = regmap_lock_hwlock;
+ map->unlock = regmap_unlock_hwlock;
+ break;
+ }
+
+ map->lock_arg = map;
+#else
+ ret = -EINVAL;
+ goto err_map;
+#endif
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
@@ -729,7 +803,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_2_6_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -739,7 +813,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_4_12_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -749,7 +823,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_7_9_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -759,7 +833,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_10_14_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -779,13 +853,13 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_16_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
case 24:
if (reg_endian != REGMAP_ENDIAN_BIG)
- goto err_map;
+ goto err_hwlock;
map->format.format_reg = regmap_format_24;
break;
@@ -801,7 +875,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_32_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -818,13 +892,13 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_64_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#endif
default:
- goto err_map;
+ goto err_hwlock;
}
if (val_endian == REGMAP_ENDIAN_NATIVE)
@@ -853,12 +927,12 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_16_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
case 24:
if (val_endian != REGMAP_ENDIAN_BIG)
- goto err_map;
+ goto err_hwlock;
map->format.format_val = regmap_format_24;
map->format.parse_val = regmap_parse_24;
break;
@@ -879,7 +953,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_32_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#ifdef CONFIG_64BIT
@@ -900,7 +974,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_64_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#endif
@@ -909,18 +983,18 @@ struct regmap *__regmap_init(struct device *dev,
if (map->format.format_write) {
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
(val_endian != REGMAP_ENDIAN_BIG))
- goto err_map;
+ goto err_hwlock;
map->use_single_write = true;
}
if (!map->format.format_write &&
!(map->format.format_reg && map->format.format_val))
- goto err_map;
+ goto err_hwlock;
map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
if (map->work_buf == NULL) {
ret = -ENOMEM;
- goto err_map;
+ goto err_hwlock;
}
if (map->format.format_write) {
@@ -1041,6 +1115,9 @@ err_regcache:
err_range:
regmap_range_exit(map);
kfree(map->work_buf);
+err_hwlock:
+ if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ hwspin_lock_free(map->hwlock);
err_map:
kfree(map);
err:
@@ -1228,6 +1305,8 @@ void regmap_exit(struct regmap *map)
kfree(async->work_buf);
kfree(async);
}
+ if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ hwspin_lock_free(map->hwlock);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b640ad8a6d20..adc877dfef5c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2692,7 +2692,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
* from the parent.
*/
page_count = (u32)calc_pages_for(0, length);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
result = PTR_ERR(pages);
pages = NULL;
@@ -2827,7 +2827,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
*/
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail_stat_request;
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 610638a80383..461bf0b8a094 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -110,6 +110,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
return -EFAULT;
}
+ if (in_size < 6 ||
+ in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EINVAL;
+ }
+
/* atomic tpm command send and result receive. We only hold the ops
* lock during this period so that the tpm can be unregistered even if
* the char dev is held open.
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 86f38d239476..83a77a445538 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -20,44 +20,48 @@
#include <linux/device.h>
#include "tpm.h"
-#define READ_PUBEK_RESULT_SIZE 314
+struct tpm_readpubek_out {
+ u8 algorithm[4];
+ u8 encscheme[2];
+ u8 sigscheme[2];
+ __be32 paramsize;
+ u8 parameters[12];
+ __be32 keysize;
+ u8 modulus[256];
+ u8 checksum[20];
+} __packed;
+
#define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
#define TPM_ORD_READPUBEK 124
-static const struct tpm_input_header tpm_readpubek_header = {
- .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
- .length = cpu_to_be32(30),
- .ordinal = cpu_to_be32(TPM_ORD_READPUBEK)
-};
+
static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u8 *data;
- struct tpm_cmd_t tpm_cmd;
- ssize_t err;
- int i, rc;
+ struct tpm_buf tpm_buf;
+ struct tpm_readpubek_out *out;
+ ssize_t rc;
+ int i;
char *str = buf;
struct tpm_chip *chip = to_tpm_chip(dev);
+ char anti_replay[20];
- memset(&tpm_cmd, 0, sizeof(tpm_cmd));
-
- tpm_cmd.header.in = tpm_readpubek_header;
- err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
- READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
- "attempting to read the PUBEK");
- if (err)
- goto out;
-
- /*
- ignore header 10 bytes
- algorithm 32 bits (1 == RSA )
- encscheme 16 bits
- sigscheme 16 bits
- parameters (RSA 12->bytes: keybit, #primes, expbit)
- keylenbytes 32 bits
- 256 byte modulus
- ignore checksum 20 bytes
- */
- data = tpm_cmd.params.readpubek_out_buffer;
+ memset(&anti_replay, 0, sizeof(anti_replay));
+
+ rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
+ if (rc)
+ return rc;
+
+ tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
+
+ rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
+ READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
+ "attempting to read the PUBEK");
+ if (rc) {
+ tpm_buf_destroy(&tpm_buf);
+ return 0;
+ }
+
+ out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
str +=
sprintf(str,
"Algorithm: %02X %02X %02X %02X\n"
@@ -68,21 +72,26 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
"%02X %02X %02X %02X\n"
"Modulus length: %d\n"
"Modulus:\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5],
- data[6], data[7],
- data[12], data[13], data[14], data[15],
- data[16], data[17], data[18], data[19],
- data[20], data[21], data[22], data[23],
- be32_to_cpu(*((__be32 *) (data + 24))));
+ out->algorithm[0], out->algorithm[1], out->algorithm[2],
+ out->algorithm[3],
+ out->encscheme[0], out->encscheme[1],
+ out->sigscheme[0], out->sigscheme[1],
+ out->parameters[0], out->parameters[1],
+ out->parameters[2], out->parameters[3],
+ out->parameters[4], out->parameters[5],
+ out->parameters[6], out->parameters[7],
+ out->parameters[8], out->parameters[9],
+ out->parameters[10], out->parameters[11],
+ be32_to_cpu(out->keysize));
for (i = 0; i < 256; i++) {
- str += sprintf(str, "%02X ", data[i + 28]);
+ str += sprintf(str, "%02X ", out->modulus[i]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
-out:
+
rc = str - buf;
+ tpm_buf_destroy(&tpm_buf);
return rc;
}
static DEVICE_ATTR_RO(pubek);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2d5466a72e40..528cffbd49d3 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -345,17 +345,6 @@ enum tpm_sub_capabilities {
TPM_CAP_PROP_TIS_DURATION = 0x120,
};
-struct tpm_readpubek_params_out {
- u8 algorithm[4];
- u8 encscheme[2];
- u8 sigscheme[2];
- __be32 paramsize;
- u8 parameters[12]; /*assuming RSA*/
- __be32 keysize;
- u8 modulus[256];
- u8 checksum[20];
-} __packed;
-
typedef union {
struct tpm_input_header in;
struct tpm_output_header out;
@@ -385,8 +374,6 @@ struct tpm_getrandom_in {
} __packed;
typedef union {
- struct tpm_readpubek_params_out readpubek_out;
- u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
struct tpm_pcrread_in pcrread_in;
struct tpm_pcrread_out pcrread_out;
struct tpm_getrandom_in getrandom_in;
@@ -557,7 +544,7 @@ static inline void tpm_add_ppi(struct tpm_chip *chip)
}
#endif
-static inline inline u32 tpm2_rc_value(u32 rc)
+static inline u32 tpm2_rc_value(u32 rc)
{
return (rc & BIT(7)) ? rc & 0xff : rc;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index e1a41b788f08..f40d20671a78 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -834,72 +834,43 @@ static const struct tpm_input_header tpm2_selftest_header = {
};
/**
- * tpm2_continue_selftest() - start a self test
- *
- * @chip: TPM chip to use
- * @full: test all commands instead of testing only those that were not
- * previously tested.
- *
- * Return: Same as with tpm_transmit_cmd with exception of RC_TESTING.
- */
-static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
-{
- int rc;
- struct tpm2_cmd cmd;
-
- cmd.header.in = tpm2_selftest_header;
- cmd.params.selftest_in.full_test = full;
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE, 0, 0,
- "continue selftest");
-
- /* At least some prototype chips seem to give RC_TESTING error
- * immediately. This is a workaround for that.
- */
- if (rc == TPM2_RC_TESTING) {
- dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n");
- rc = 0;
- }
-
- return rc;
-}
-
-/**
- * tpm2_do_selftest() - run a full self test
+ * tpm2_do_selftest() - ensure that all self tests have passed
*
* @chip: TPM chip to use
*
* Return: Same as with tpm_transmit_cmd.
*
- * During the self test TPM2 commands return with the error code RC_TESTING.
- * Waiting is done by issuing PCR read until it executes successfully.
+ * The TPM can either run all self tests synchronously and then return
+ * RC_SUCCESS once all tests were successful. Or it can choose to run the tests
+ * asynchronously and return RC_TESTING immediately while the self tests still
+ * execute in the background. This function handles both cases and waits until
+ * all tests have completed.
*/
static int tpm2_do_selftest(struct tpm_chip *chip)
{
int rc;
- unsigned int loops;
- unsigned int delay_msec = 100;
- unsigned long duration;
- int i;
-
- duration = tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST);
+ unsigned int delay_msec = 20;
+ long duration;
+ struct tpm2_cmd cmd;
- loops = jiffies_to_msecs(duration) / delay_msec;
+ duration = jiffies_to_msecs(
+ tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST));
- rc = tpm2_start_selftest(chip, true);
- if (rc)
- return rc;
+ while (duration > 0) {
+ cmd.header.in = tpm2_selftest_header;
+ cmd.params.selftest_in.full_test = 0;
- for (i = 0; i < loops; i++) {
- /* Attempt to read a PCR value */
- rc = tpm2_pcr_read(chip, 0, NULL);
- if (rc < 0)
- break;
+ rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE,
+ 0, 0, "continue selftest");
if (rc != TPM2_RC_TESTING)
break;
tpm_msleep(delay_msec);
+ duration -= delay_msec;
+
+ /* wait longer the next round */
+ delay_msec *= 2;
}
return rc;
@@ -1009,7 +980,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
{
struct tpm_buf buf;
u32 nr_commands;
- u32 *attrs;
+ __be32 *attrs;
u32 cc;
int i;
int rc;
@@ -1049,7 +1020,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
chip->nr_commands = nr_commands;
- attrs = (u32 *)&buf.data[TPM_HEADER_SIZE + 9];
+ attrs = (__be32 *)&buf.data[TPM_HEADER_SIZE + 9];
for (i = 0; i < nr_commands; i++, attrs++) {
chip->cc_attrs_tbl[i] = be32_to_cpup(attrs);
cc = chip->cc_attrs_tbl[i] & 0xFFFF;
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index e2e059d8ffec..4e4014eabdb9 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -242,7 +242,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
struct tpm_space *space = &chip->work_space;
unsigned int nr_handles;
u32 attrs;
- u32 *handle;
+ __be32 *handle;
int i;
i = tpm2_find_cc(chip, cc);
@@ -252,7 +252,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
attrs = chip->cc_attrs_tbl[i];
nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
- handle = (u32 *)&cmd[TPM_HEADER_SIZE];
+ handle = (__be32 *)&cmd[TPM_HEADER_SIZE];
for (i = 0; i < nr_handles; i++, handle++) {
if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) {
if (!tpm2_map_to_phandle(space, handle))
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 8f0a98dea327..7b3c2a8aa9de 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -92,14 +92,9 @@ enum crb_status {
CRB_DRV_STS_COMPLETE = BIT(0),
};
-enum crb_flags {
- CRB_FL_ACPI_START = BIT(0),
- CRB_FL_CRB_START = BIT(1),
- CRB_FL_CRB_SMC_START = BIT(2),
-};
-
struct crb_priv {
- unsigned int flags;
+ u32 sm;
+ const char *hid;
void __iomem *iobase;
struct crb_regs_head __iomem *regs_h;
struct crb_regs_tail __iomem *regs_t;
@@ -128,14 +123,16 @@ struct tpm2_crb_smc {
* Anyhow, we do not wait here as a consequent CMD_READY request
* will be handled correctly even if idle was not completed.
*
- * The function does nothing for devices with ACPI-start method.
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
*
* Return: 0 always
*/
static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
{
- if ((priv->flags & CRB_FL_ACPI_START) ||
- (priv->flags & CRB_FL_CRB_SMC_START))
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
@@ -174,14 +171,16 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
* The device should respond within TIMEOUT_C.
*
* The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
*
* Return: 0 on success -ETIME on timeout;
*/
static int __maybe_unused crb_cmd_ready(struct device *dev,
struct crb_priv *priv)
{
- if ((priv->flags & CRB_FL_ACPI_START) ||
- (priv->flags & CRB_FL_CRB_SMC_START))
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
@@ -325,13 +324,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* Make sure that cmd is populated before issuing start. */
wmb();
- if (priv->flags & CRB_FL_CRB_START)
+ /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
+ * report only ACPI start but in practice seems to require both
+ * CRB start, hence invoking CRB start method if hid == MSFT0101.
+ */
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
+ (!strcmp(priv->hid, "MSFT0101")))
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
- if (priv->flags & CRB_FL_ACPI_START)
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
rc = crb_do_acpi_start(chip);
- if (priv->flags & CRB_FL_CRB_SMC_START) {
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
@@ -345,7 +351,9 @@ static void crb_cancel(struct tpm_chip *chip)
iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
- if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip))
+ if (((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
}
@@ -458,7 +466,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* the control area, as one nice sane region except for some older
* stuff that puts the control area outside the ACPI IO region.
*/
- if (!(priv->flags & CRB_FL_ACPI_START)) {
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
if (buf->control_address == io_res.start +
sizeof(*priv->regs_h))
priv->regs_h = priv->iobase;
@@ -552,18 +561,6 @@ static int crb_acpi_add(struct acpi_device *device)
if (!priv)
return -ENOMEM;
- /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
- * report only ACPI start but in practice seems to require both
- * ACPI start and CRB start.
- */
- if (sm == ACPI_TPM2_COMMAND_BUFFER || sm == ACPI_TPM2_MEMORY_MAPPED ||
- !strcmp(acpi_device_hid(device), "MSFT0101"))
- priv->flags |= CRB_FL_CRB_START;
-
- if (sm == ACPI_TPM2_START_METHOD ||
- sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
- priv->flags |= CRB_FL_ACPI_START;
-
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
dev_err(dev,
@@ -574,9 +571,11 @@ static int crb_acpi_add(struct acpi_device *device)
}
crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
priv->smc_func_id = crb_smc->smc_func_id;
- priv->flags |= CRB_FL_CRB_SMC_START;
}
+ priv->sm = sm;
+ priv->hid = acpi_device_hid(device);
+
rc = crb_map_io(device, priv, buf);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7e55aa9ce680..e2d1055fb814 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -30,6 +30,7 @@
#include <linux/freezer.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/kernel.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -223,7 +224,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
}
static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value)
+ const u8 *value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
@@ -365,7 +366,7 @@ static struct pnp_driver tis_pnp_driver = {
},
};
-#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
+#define TIS_HID_USR_IDX (ARRAY_SIZE(tpm_pnp_tbl) - 2)
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 63bc6c3b949e..fdde971bc810 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -252,7 +252,7 @@ out:
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc, status, burstcnt;
@@ -343,7 +343,7 @@ static void disable_interrupts(struct tpm_chip *chip)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc;
@@ -445,7 +445,7 @@ static int probe_itpm(struct tpm_chip *chip)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
- u8 cmd_getticks[] = {
+ static const u8 cmd_getticks[] = {
0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0xf1
};
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index e2212f021a02..6bbac319ff3b 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value);
+ const u8 *value);
int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
@@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
}
static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
return data->phy_ops->write_bytes(data, addr, len, value);
}
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 88fe72ae967f..424ff2fde1f2 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -46,9 +46,7 @@
struct tpm_tis_spi_phy {
struct tpm_tis_data priv;
struct spi_device *spi_device;
-
- u8 tx_buf[4];
- u8 rx_buf[4];
+ u8 *iobuf;
};
static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
@@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
}
static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *buffer, u8 direction)
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
@@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
while (len) {
transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
- phy->tx_buf[0] = direction | (transfer_len - 1);
- phy->tx_buf[1] = 0xd4;
- phy->tx_buf[2] = addr >> 8;
- phy->tx_buf[3] = addr;
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
memset(&spi_xfer, 0, sizeof(spi_xfer));
- spi_xfer.tx_buf = phy->tx_buf;
- spi_xfer.rx_buf = phy->rx_buf;
+ spi_xfer.tx_buf = phy->iobuf;
+ spi_xfer.rx_buf = phy->iobuf;
spi_xfer.len = 4;
spi_xfer.cs_change = 1;
@@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
- if ((phy->rx_buf[3] & 0x01) == 0) {
+ if ((phy->iobuf[3] & 0x01) == 0) {
// handle SPI wait states
- phy->tx_buf[0] = 0;
+ phy->iobuf[0] = 0;
for (i = 0; i < TPM_RETRY; i++) {
spi_xfer.len = 1;
@@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
ret = spi_sync_locked(phy->spi_device, &m);
if (ret < 0)
goto exit;
- if (phy->rx_buf[0] & 0x01)
+ if (phy->iobuf[0] & 0x01)
break;
}
@@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_xfer.len = transfer_len;
spi_xfer.delay_usecs = 5;
- if (direction) {
+ if (in) {
spi_xfer.tx_buf = NULL;
- spi_xfer.rx_buf = buffer;
- } else {
- spi_xfer.tx_buf = buffer;
+ } else if (out) {
spi_xfer.rx_buf = NULL;
+ memcpy(phy->iobuf, out, transfer_len);
+ out += transfer_len;
}
spi_message_init(&m);
@@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
+ if (in) {
+ memcpy(in, phy->iobuf, transfer_len);
+ in += transfer_len;
+ }
+
len -= transfer_len;
- buffer += transfer_len;
}
exit:
@@ -139,40 +141,51 @@ exit:
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result)
{
- return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
+ return tpm_tis_spi_transfer(data, addr, len, result, NULL);
}
static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
- return tpm_tis_spi_transfer(data, addr, len, value, 0);
+ return tpm_tis_spi_transfer(data, addr, len, NULL, value);
}
static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
+ __le16 result_le;
int rc;
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), (u8 *)result);
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
+ (u8 *)&result_le);
if (!rc)
- *result = le16_to_cpu(*result);
+ *result = le16_to_cpu(result_le);
+
return rc;
}
static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
+ __le32 result_le;
int rc;
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), (u8 *)result);
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
+ (u8 *)&result_le);
if (!rc)
- *result = le32_to_cpu(*result);
+ *result = le32_to_cpu(result_le);
+
return rc;
}
static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
- value = cpu_to_le32(value);
- return data->phy_ops->write_bytes(data, addr, sizeof(u32),
- (u8 *)&value);
+ __le32 value_le;
+ int rc;
+
+ value_le = cpu_to_le32(value);
+ rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
+ (u8 *)&value_le);
+
+ return rc;
}
static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
@@ -194,6 +207,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
phy->spi_device = dev;
+ phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
NULL);
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ac2f30295efe..8b16ec595fa7 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3434,9 +3434,14 @@ MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
static int __init amd64_edac_init(void)
{
+ const char *owner;
int err = -ENODEV;
int i;
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
if (!x86_match_cpu(amd64_cpuids))
return -ENODEV;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 480072139b7a..48193f5f3b56 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -53,7 +53,7 @@ static LIST_HEAD(mc_devices);
* Used to lock EDAC MC to just one module, avoiding two drivers e. g.
* apei/ghes and i7core_edac to be used at the same time.
*/
-static void const *edac_mc_owner;
+static const char *edac_mc_owner;
static struct bus_type mc_bus[EDAC_MAX_MCS];
@@ -701,6 +701,11 @@ unlock:
}
EXPORT_SYMBOL(edac_mc_find);
+const char *edac_get_owner(void)
+{
+ return edac_mc_owner;
+}
+EXPORT_SYMBOL_GPL(edac_get_owner);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 5357800e418d..4165e15995ad 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -128,6 +128,14 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
unsigned sz_pvt);
/**
+ * edac_get_owner - Return the owner's mod_name of EDAC MC
+ *
+ * Returns:
+ * Pointer to mod_name string when EDAC MC is owned. NULL otherwise.
+ */
+extern const char *edac_get_owner(void);
+
+/*
* edac_mc_add_mc_with_groups() - Insert the @mci structure into the mci
* global list and create sysfs entries associated with @mci structure.
*
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 6f80eb65c26c..68b6ee18bea6 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -28,10 +28,19 @@ struct ghes_edac_pvt {
char msg[80];
};
-static LIST_HEAD(ghes_reglist);
-static DEFINE_MUTEX(ghes_edac_lock);
-static int ghes_edac_mc_num;
+static atomic_t ghes_init = ATOMIC_INIT(0);
+static struct ghes_edac_pvt *ghes_pvt;
+/*
+ * Sync with other, potentially concurrent callers of
+ * ghes_edac_report_mem_error(). We don't know what the
+ * "inventive" firmware would do.
+ */
+static DEFINE_SPINLOCK(ghes_lock);
+
+/* "ghes_edac.force_load=1" skips the platform check */
+static bool __read_mostly force_load;
+module_param(force_load, bool, 0);
/* Memory Device - Type 17 of SMBIOS spec */
struct memdev_dmi_entry {
@@ -169,18 +178,26 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
enum hw_event_mc_err_type type;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt = NULL;
+ struct ghes_edac_pvt *pvt = ghes_pvt;
+ unsigned long flags;
char *p;
u8 grain_bits;
- list_for_each_entry(pvt, &ghes_reglist, list) {
- if (ghes == pvt->ghes)
- break;
- }
if (!pvt) {
pr_err("Internal error: Can't find EDAC structure\n");
return;
}
+
+ /*
+ * We can do the locking below because GHES defers error processing
+ * from NMI to IRQ context. Whenever that changes, we'd at least
+ * know.
+ */
+ if (WARN_ON_ONCE(in_nmi()))
+ return;
+
+ spin_lock_irqsave(&ghes_lock, flags);
+
mci = pvt->mci;
e = &mci->error_desc;
@@ -398,10 +415,17 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
(e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
grain_bits, e->syndrome, pvt->detail_location);
- /* Report the error via EDAC API */
edac_raw_mc_handle_error(type, mci, e);
+ spin_unlock_irqrestore(&ghes_lock, flags);
}
-EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error);
+
+/*
+ * Known systems that are safe to enable this module.
+ */
+static struct acpi_platform_list plat_list[] = {
+ {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions},
+ { } /* End */
+};
int ghes_edac_register(struct ghes *ghes, struct device *dev)
{
@@ -409,8 +433,19 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
int rc, num_dimm = 0;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[1];
- struct ghes_edac_pvt *pvt;
struct ghes_edac_dimm_fill dimm_fill;
+ int idx;
+
+ /* Check if safe to enable on this system */
+ idx = acpi_match_platform_list(plat_list);
+ if (!force_load && idx < 0)
+ return 0;
+
+ /*
+ * We have only one logical memory controller to which all DIMMs belong.
+ */
+ if (atomic_inc_return(&ghes_init) > 1)
+ return 0;
/* Get the number of DIMMs */
dmi_walk(ghes_edac_count_dimms, &num_dimm);
@@ -425,26 +460,17 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
layers[0].size = num_dimm;
layers[0].is_virt_csrow = true;
- /*
- * We need to serialize edac_mc_alloc() and edac_mc_add_mc(),
- * to avoid duplicated memory controller numbers
- */
- mutex_lock(&ghes_edac_lock);
- mci = edac_mc_alloc(ghes_edac_mc_num, ARRAY_SIZE(layers), layers,
- sizeof(*pvt));
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
if (!mci) {
pr_info("Can't allocate memory for EDAC data\n");
- mutex_unlock(&ghes_edac_lock);
return -ENOMEM;
}
- pvt = mci->pvt_info;
- memset(pvt, 0, sizeof(*pvt));
- list_add_tail(&pvt->list, &ghes_reglist);
- pvt->ghes = ghes;
- pvt->mci = mci;
- mci->pdev = dev;
+ ghes_pvt = mci->pvt_info;
+ ghes_pvt->ghes = ghes;
+ ghes_pvt->mci = mci;
+ mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_EMPTY;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
@@ -452,36 +478,23 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci->ctl_name = "ghes_edac";
mci->dev_name = "ghes";
- if (!ghes_edac_mc_num) {
- if (!fake) {
- pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
- pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
- pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
- pr_info("If you find incorrect reports, please contact your hardware vendor\n");
- pr_info("to correct its BIOS.\n");
- pr_info("This system has %d DIMM sockets.\n",
- num_dimm);
- } else {
- pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
- pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
- pr_info("work on such system. Use this driver with caution\n");
- }
+ if (fake) {
+ pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
+ pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
+ pr_info("work on such system. Use this driver with caution\n");
+ } else if (idx < 0) {
+ pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
+ pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
+ pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
+ pr_info("If you find incorrect reports, please contact your hardware vendor\n");
+ pr_info("to correct its BIOS.\n");
+ pr_info("This system has %d DIMM sockets.\n", num_dimm);
}
if (!fake) {
- /*
- * Fill DIMM info from DMI for the memory controller #0
- *
- * Keep it in blank for the other memory controllers, as
- * there's no reliable way to properly credit each DIMM to
- * the memory controller, as different BIOSes fill the
- * DMI bank location fields on different ways
- */
- if (!ghes_edac_mc_num) {
- dimm_fill.count = 0;
- dimm_fill.mci = mci;
- dmi_walk(ghes_edac_dmidecode, &dimm_fill);
- }
+ dimm_fill.count = 0;
+ dimm_fill.mci = mci;
+ dmi_walk(ghes_edac_dmidecode, &dimm_fill);
} else {
struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
mci->n_layers, 0, 0, 0);
@@ -497,28 +510,16 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (rc < 0) {
pr_info("Can't register at EDAC core\n");
edac_mc_free(mci);
- mutex_unlock(&ghes_edac_lock);
return -ENODEV;
}
-
- ghes_edac_mc_num++;
- mutex_unlock(&ghes_edac_lock);
return 0;
}
-EXPORT_SYMBOL_GPL(ghes_edac_register);
void ghes_edac_unregister(struct ghes *ghes)
{
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt, *tmp;
-
- list_for_each_entry_safe(pvt, tmp, &ghes_reglist, list) {
- if (ghes == pvt->ghes) {
- mci = pvt->mci;
- edac_mc_del_mc(mci->pdev);
- edac_mc_free(mci);
- list_del(&pvt->list);
- }
- }
+
+ mci = ghes_pvt->mci;
+ edac_mc_del_mc(mci->pdev);
+ edac_mc_free(mci);
}
-EXPORT_SYMBOL_GPL(ghes_edac_unregister);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index c16c3b931b3d..8c5540160a23 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2159,8 +2159,13 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7core_edac.c";
- mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
- i7core_dev->socket);
+
+ mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket);
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
mci->dev_name = pci_name(i7core_dev->pdev[0]);
mci->ctl_page_to_phys = NULL;
@@ -2214,6 +2219,8 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
fail0:
kfree(mci->ctl_name);
+
+fail1:
edac_mc_free(mci);
i7core_dev->mci = NULL;
return rc;
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 4395c84cdcbf..df28b65358d2 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -45,6 +45,8 @@
#include "edac_module.h"
#include "pnd2_edac.h"
+#define EDAC_MOD_STR "pnd2_edac"
+
#define APL_NUM_CHANNELS 4
#define DNV_NUM_CHANNELS 2
#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
@@ -1355,7 +1357,7 @@ static int pnd2_register_mci(struct mem_ctl_info **ppmci)
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
- mci->mod_name = "pnd2_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = ops->name;
mci->ctl_name = "Pondicherry2";
@@ -1547,10 +1549,15 @@ MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
static int __init pnd2_init(void)
{
const struct x86_cpu_id *id;
+ const char *owner;
int rc;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(pnd2_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index dc0591654011..f34430f99fd8 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -36,7 +36,7 @@ static LIST_HEAD(sbridge_edac_list);
* Alter this version for the module when modifications are made
*/
#define SBRIDGE_REVISION " Ver: 1.1.2 "
-#define EDAC_MOD_STR "sbridge_edac"
+#define EDAC_MOD_STR "sb_edac"
/*
* Debug macros
@@ -462,6 +462,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
static const struct pci_id_descr pci_dev_descr_ibridge[] = {
/* Processor Home Agent */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
/* Memory controller */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
@@ -472,7 +473,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
/* Optional, mode 2HA */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
@@ -1318,9 +1318,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
int cur_reg_start;
int mc;
int channel;
- int way;
int participants[KNL_MAX_CHANNELS];
- int participant_count = 0;
for (i = 0; i < KNL_MAX_CHANNELS; i++)
mc_sizes[i] = 0;
@@ -1495,21 +1493,14 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
* this channel mapped to the given target?
*/
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
- for (way = 0; way < intrlv_ways; way++) {
- int target;
- int cha;
-
- if (KNL_MOD3(dram_rule))
- target = way;
- else
- target = 0x7 & sad_pkg(
- pvt->info.interleave_pkg, interleave_reg, way);
+ int target;
+ int cha;
+ for (target = 0; target < KNL_MAX_CHANNELS; target++) {
for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
if (knl_get_mc_route(target,
mc_route_reg[cha]) == channel
&& !participants[channel]) {
- participant_count++;
participants[channel] = 1;
break;
}
@@ -1517,10 +1508,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
}
}
- if (participant_count != intrlv_ways)
- edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n",
- participant_count, intrlv_ways);
-
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
mc = knl_channel_mc(channel);
if (participants[channel]) {
@@ -2291,6 +2278,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
next_imc:
sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
if (!sbridge_dev) {
+ /* If the HA1 wasn't found, don't create EDAC second memory controller */
+ if (dev_descr->dom == IMC1 && devno != 1) {
+ edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ pci_dev_put(pdev);
+ return 0;
+ }
if (dev_descr->dom == SOCK)
goto out_imc;
@@ -2491,6 +2485,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
pvt->pci_ta = pdev;
+ break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
pvt->pci_ras = pdev;
@@ -3155,7 +3150,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "sb_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
@@ -3287,6 +3282,11 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
break;
}
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
/* Get dimm basic config and the memory layout */
rc = get_dimm_config(mci);
if (rc < 0) {
@@ -3402,10 +3402,15 @@ static void sbridge_remove(void)
static int __init sbridge_init(void)
{
const struct x86_cpu_id *id;
+ const char *owner;
int rc;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(sbridge_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 16dea97568a1..912c4930c9ef 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -31,6 +31,8 @@
#include "edac_module.h"
+#define EDAC_MOD_STR "skx_edac"
+
/*
* Debug macros
*/
@@ -65,6 +67,7 @@ static u64 skx_tolm, skx_tohm;
struct skx_dev {
struct list_head list;
u8 bus[4];
+ int seg;
struct pci_dev *sad_all;
struct pci_dev *util_all;
u32 mcroute;
@@ -110,12 +113,12 @@ struct decoded_addr {
int bank_group;
};
-static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
+static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
list_for_each_entry(d, &skx_edac_list, list) {
- if (d->bus[idx] == bus)
+ if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number)
return d;
}
@@ -172,6 +175,7 @@ static int get_all_bus_mappings(void)
pci_dev_put(pdev);
return -ENOMEM;
}
+ d->seg = pci_domain_nr(pdev->bus);
pci_read_config_dword(pdev, 0xCC, &reg);
d->bus[0] = GET_BITFIELD(reg, 0, 7);
d->bus[1] = GET_BITFIELD(reg, 8, 15);
@@ -207,7 +211,7 @@ static int get_all_munits(const struct munit *m)
if (i == NUM_IMC)
goto fail;
}
- d = get_skx_dev(pdev->bus->number, m->busidx);
+ d = get_skx_dev(pdev->bus, m->busidx);
if (!d)
goto fail;
@@ -299,7 +303,7 @@ static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15)
-#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
+#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 0, 2, "ranks")
#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
@@ -360,7 +364,7 @@ static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
imc->mc, chan, dimmno, size, npages,
- banks, ranks, rows, cols);
+ banks, 1 << ranks, rows, cols);
imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
@@ -464,12 +468,16 @@ static int skx_register_mci(struct skx_imc *imc)
pvt = mci->pvt_info;
pvt->imc = imc;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
- imc->node_id, imc->lmc);
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", imc->node_id, imc->lmc);
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
mci->mtype_cap = MEM_FLAG_DDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "skx_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(imc->chan[0].cdev);
mci->ctl_page_to_phys = NULL;
@@ -491,6 +499,7 @@ static int skx_register_mci(struct skx_imc *imc)
fail:
kfree(mci->ctl_name);
+fail0:
edac_mc_free(mci);
imc->mci = NULL;
return rc;
@@ -1039,12 +1048,17 @@ static int __init skx_init(void)
{
const struct x86_cpu_id *id;
const struct munit *m;
+ const char *owner;
int rc = 0, i;
u8 mc = 0, src_id, node_id;
struct skx_dev *d;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(skx_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index f35d87519a3e..4803c6468bab 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -639,27 +639,6 @@ err_free:
return ret;
}
-#ifdef CONFIG_PM
-static int thunderx_lmc_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int thunderx_lmc_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- return 0;
-}
-#endif
-
static const struct pci_device_id thunderx_lmc_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) },
{ 0, },
@@ -834,10 +813,6 @@ static struct pci_driver thunderx_lmc_driver = {
.name = "thunderx_lmc_edac",
.probe = thunderx_lmc_probe,
.remove = thunderx_lmc_remove,
-#ifdef CONFIG_PM
- .suspend = thunderx_lmc_suspend,
- .resume = thunderx_lmc_resume,
-#endif
.id_table = thunderx_lmc_pci_tbl,
};
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4ac454ae54d7..83876a1c8d98 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -2094,6 +2094,11 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err;
}
+ if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
+ err = -EINVAL;
+ goto err;
+ }
+
syncobj = drm_syncobj_find(file, fence.handle);
if (!syncobj) {
DRM_DEBUG("Invalid syncobj handle provided\n");
@@ -2101,6 +2106,9 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err;
}
+ BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
+ ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
+
fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e2410eb5d96e..ad524cb0f6fc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -832,10 +832,14 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
}
}
-struct sgt_dma {
+static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
-};
+} sgt_dma(struct i915_vma *vma) {
+ struct scatterlist *sg = vma->pages->sgl;
+ dma_addr_t addr = sg_dma_address(sg);
+ return (struct sgt_dma) { sg, addr, addr + sg->length };
+}
struct gen8_insert_pte {
u16 pml4e;
@@ -916,11 +920,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = {
- .sg = vma->pages->sgl,
- .dma = sg_dma_address(iter.sg),
- .max = iter.dma + iter.sg->length,
- };
+ struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
@@ -933,11 +933,7 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = {
- .sg = vma->pages->sgl,
- .dma = sg_dma_address(iter.sg),
- .max = iter.dma + iter.sg->length,
- };
+ struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
@@ -1632,13 +1628,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter;
+ struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
- iter.sg = vma->pages->sgl;
- iter.dma = sg_dma_address(iter.sg);
- iter.max = iter.dma + iter.sg->length;
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e84fee3ec4f3..184340d486c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -721,7 +721,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 2;
+ mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 3bbad22b3748..d6b1c509ae01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -224,7 +224,7 @@ out:
return ret;
}
-static struct dma_fence_ops vmw_fence_ops = {
+static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index d65431417b17..7ad017690e3a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -552,6 +552,7 @@ config SENSORS_G762
config SENSORS_GPIO_FAN
tristate "GPIO fan"
+ depends on OF_GPIO
depends on GPIOLIB || COMPILE_TEST
depends on THERMAL || THERMAL=n
help
@@ -862,6 +863,20 @@ tristate "MAX31722 temperature sensor"
This driver can also be built as a module. If so, the module
will be called max31722.
+config SENSORS_MAX6621
+ tristate "Maxim MAX6621 sensor chip"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for MAX6621 sensor chip.
+ MAX6621 is a PECI-to-I2C translator provides an efficient,
+ low-cost solution for PECI-to-SMBus/I2C protocol conversion.
+ It allows reading the temperature from the PECI-compliant
+ host directly from up to four PECI-enabled CPUs.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6621.
+
config SENSORS_MAX6639
tristate "Maxim MAX6639 sensor chip"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 23e195a5a2f3..0fe489fab663 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -118,6 +118,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 4875e99b59c9..6d34c05a4f83 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -579,7 +579,6 @@ static ssize_t show_pwm_enable(struct device *dev,
mutex_unlock(&data->update_lock);
val = config | (altbit << 3);
- newval = 0;
if (val == 3 || val >= 10)
newval = 255;
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 69b97d45e3cb..63a95e23ca81 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -7,19 +7,19 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
-#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/sysfs.h>
#include <linux/regmap.h>
+#include <linux/sysfs.h>
#include <linux/thermal.h>
/* ASPEED PWM & FAN Tach Register Definition */
@@ -161,7 +161,7 @@
* 11: reserved.
*/
#define M_TACH_MODE 0x02 /* 10b */
-#define M_TACH_UNIT 0x00c0
+#define M_TACH_UNIT 0x0210
#define INIT_FAN_CTRL 0xFF
/* How long we sleep in us while waiting for an RPM result. */
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 9c355b9d31c5..5c9a52599cf6 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -29,21 +29,24 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/hwmon.h>
-#include <linux/gpio.h>
-#include <linux/gpio-fan.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/thermal.h>
+struct gpio_fan_speed {
+ int rpm;
+ int ctrl_val;
+};
+
struct gpio_fan_data {
- struct platform_device *pdev;
+ struct device *dev;
struct device *hwmon_dev;
/* Cooling device if any */
struct thermal_cooling_device *cdev;
struct mutex lock; /* lock GPIOs operations. */
- int num_ctrl;
- unsigned *ctrl;
+ int num_gpios;
+ struct gpio_desc **gpios;
int num_speed;
struct gpio_fan_speed *speed;
int speed_index;
@@ -51,7 +54,7 @@ struct gpio_fan_data {
int resume_speed;
#endif
bool pwm_enable;
- struct gpio_fan_alarm *alarm;
+ struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
};
@@ -64,8 +67,8 @@ static void fan_alarm_notify(struct work_struct *ws)
struct gpio_fan_data *fan_data =
container_of(ws, struct gpio_fan_data, alarm_work);
- sysfs_notify(&fan_data->pdev->dev.kobj, NULL, "fan1_alarm");
- kobject_uevent(&fan_data->pdev->dev.kobj, KOBJ_CHANGE);
+ sysfs_notify(&fan_data->dev->kobj, NULL, "fan1_alarm");
+ kobject_uevent(&fan_data->dev->kobj, KOBJ_CHANGE);
}
static irqreturn_t fan_alarm_irq_handler(int irq, void *dev_id)
@@ -81,47 +84,30 @@ static ssize_t fan1_alarm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- struct gpio_fan_alarm *alarm = fan_data->alarm;
- int value = gpio_get_value_cansleep(alarm->gpio);
- if (alarm->active_low)
- value = !value;
-
- return sprintf(buf, "%d\n", value);
+ return sprintf(buf, "%d\n",
+ gpiod_get_value_cansleep(fan_data->alarm_gpio));
}
static DEVICE_ATTR_RO(fan1_alarm);
-static int fan_alarm_init(struct gpio_fan_data *fan_data,
- struct gpio_fan_alarm *alarm)
+static int fan_alarm_init(struct gpio_fan_data *fan_data)
{
- int err;
int alarm_irq;
- struct platform_device *pdev = fan_data->pdev;
-
- fan_data->alarm = alarm;
-
- err = devm_gpio_request(&pdev->dev, alarm->gpio, "GPIO fan alarm");
- if (err)
- return err;
-
- err = gpio_direction_input(alarm->gpio);
- if (err)
- return err;
+ struct device *dev = fan_data->dev;
/*
* If the alarm GPIO don't support interrupts, just leave
* without initializing the fail notification support.
*/
- alarm_irq = gpio_to_irq(alarm->gpio);
- if (alarm_irq < 0)
+ alarm_irq = gpiod_to_irq(fan_data->alarm_gpio);
+ if (alarm_irq <= 0)
return 0;
INIT_WORK(&fan_data->alarm_work, fan_alarm_notify);
irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH);
- err = devm_request_irq(&pdev->dev, alarm_irq, fan_alarm_irq_handler,
- IRQF_SHARED, "GPIO fan alarm", fan_data);
- return err;
+ return devm_request_irq(dev, alarm_irq, fan_alarm_irq_handler,
+ IRQF_SHARED, "GPIO fan alarm", fan_data);
}
/*
@@ -133,8 +119,9 @@ static void __set_fan_ctrl(struct gpio_fan_data *fan_data, int ctrl_val)
{
int i;
- for (i = 0; i < fan_data->num_ctrl; i++)
- gpio_set_value_cansleep(fan_data->ctrl[i], (ctrl_val >> i) & 1);
+ for (i = 0; i < fan_data->num_gpios; i++)
+ gpiod_set_value_cansleep(fan_data->gpios[i],
+ (ctrl_val >> i) & 1);
}
static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
@@ -142,10 +129,10 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
int i;
int ctrl_val = 0;
- for (i = 0; i < fan_data->num_ctrl; i++) {
+ for (i = 0; i < fan_data->num_gpios; i++) {
int value;
- value = gpio_get_value_cansleep(fan_data->ctrl[i]);
+ value = gpiod_get_value_cansleep(fan_data->gpios[i]);
ctrl_val |= (value << i);
}
return ctrl_val;
@@ -170,7 +157,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
if (fan_data->speed[i].ctrl_val == ctrl_val)
return i;
- dev_warn(&fan_data->pdev->dev,
+ dev_warn(fan_data->dev,
"missing speed array entry for GPIO value 0x%x\n", ctrl_val);
return -ENODEV;
@@ -328,9 +315,9 @@ static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct gpio_fan_data *data = dev_get_drvdata(dev);
- if (index == 0 && !data->alarm)
+ if (index == 0 && !data->alarm_gpio)
return 0;
- if (index > 0 && !data->ctrl)
+ if (index > 0 && !data->gpios)
return 0;
return attr->mode;
@@ -358,30 +345,25 @@ static const struct attribute_group *gpio_fan_groups[] = {
NULL
};
-static int fan_ctrl_init(struct gpio_fan_data *fan_data,
- struct gpio_fan_platform_data *pdata)
+static int fan_ctrl_init(struct gpio_fan_data *fan_data)
{
- struct platform_device *pdev = fan_data->pdev;
- int num_ctrl = pdata->num_ctrl;
- unsigned *ctrl = pdata->ctrl;
+ int num_gpios = fan_data->num_gpios;
+ struct gpio_desc **gpios = fan_data->gpios;
int i, err;
- for (i = 0; i < num_ctrl; i++) {
- err = devm_gpio_request(&pdev->dev, ctrl[i],
- "GPIO fan control");
- if (err)
- return err;
-
- err = gpio_direction_output(ctrl[i],
- gpio_get_value_cansleep(ctrl[i]));
+ for (i = 0; i < num_gpios; i++) {
+ /*
+ * The GPIO descriptors were retrieved with GPIOD_ASIS so here
+ * we set the GPIO into output mode, carefully preserving the
+ * current value by setting it to whatever it is already set
+ * (no surprise changes in default fan speed).
+ */
+ err = gpiod_direction_output(gpios[i],
+ gpiod_get_value_cansleep(gpios[i]));
if (err)
return err;
}
- fan_data->num_ctrl = num_ctrl;
- fan_data->ctrl = ctrl;
- fan_data->num_speed = pdata->num_speed;
- fan_data->speed = pdata->speed;
fan_data->pwm_enable = true; /* Enable manual fan speed control. */
fan_data->speed_index = get_fan_speed_index(fan_data);
if (fan_data->speed_index < 0)
@@ -432,67 +414,47 @@ static const struct thermal_cooling_device_ops gpio_fan_cool_ops = {
.set_cur_state = gpio_fan_set_cur_state,
};
-#ifdef CONFIG_OF_GPIO
/*
* Translate OpenFirmware node properties into platform_data
*/
-static int gpio_fan_get_of_pdata(struct device *dev,
- struct gpio_fan_platform_data *pdata)
+static int gpio_fan_get_of_data(struct gpio_fan_data *fan_data)
{
- struct device_node *node;
struct gpio_fan_speed *speed;
- unsigned *ctrl;
+ struct device *dev = fan_data->dev;
+ struct device_node *np = dev->of_node;
+ struct gpio_desc **gpios;
unsigned i;
u32 u;
struct property *prop;
const __be32 *p;
- node = dev->of_node;
-
/* Alarm GPIO if one exists */
- if (of_gpio_named_count(node, "alarm-gpios") > 0) {
- struct gpio_fan_alarm *alarm;
- int val;
- enum of_gpio_flags flags;
-
- alarm = devm_kzalloc(dev, sizeof(struct gpio_fan_alarm),
- GFP_KERNEL);
- if (!alarm)
- return -ENOMEM;
-
- val = of_get_named_gpio_flags(node, "alarm-gpios", 0, &flags);
- if (val < 0)
- return val;
- alarm->gpio = val;
- alarm->active_low = flags & OF_GPIO_ACTIVE_LOW;
-
- pdata->alarm = alarm;
- }
+ fan_data->alarm_gpio = devm_gpiod_get_optional(dev, "alarm", GPIOD_IN);
+ if (IS_ERR(fan_data->alarm_gpio))
+ return PTR_ERR(fan_data->alarm_gpio);
/* Fill GPIO pin array */
- pdata->num_ctrl = of_gpio_count(node);
- if (pdata->num_ctrl <= 0) {
- if (pdata->alarm)
+ fan_data->num_gpios = gpiod_count(dev, NULL);
+ if (fan_data->num_gpios <= 0) {
+ if (fan_data->alarm_gpio)
return 0;
dev_err(dev, "DT properties empty / missing");
return -ENODEV;
}
- ctrl = devm_kzalloc(dev, pdata->num_ctrl * sizeof(unsigned),
- GFP_KERNEL);
- if (!ctrl)
+ gpios = devm_kzalloc(dev,
+ fan_data->num_gpios * sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!gpios)
return -ENOMEM;
- for (i = 0; i < pdata->num_ctrl; i++) {
- int val;
-
- val = of_get_gpio(node, i);
- if (val < 0)
- return val;
- ctrl[i] = val;
+ for (i = 0; i < fan_data->num_gpios; i++) {
+ gpios[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+ if (IS_ERR(gpios[i]))
+ return PTR_ERR(gpios[i]);
}
- pdata->ctrl = ctrl;
+ fan_data->gpios = gpios;
/* Get number of RPM/ctrl_val pairs in speed map */
- prop = of_find_property(node, "gpio-fan,speed-map", &i);
+ prop = of_find_property(np, "gpio-fan,speed-map", &i);
if (!prop) {
dev_err(dev, "gpio-fan,speed-map DT property missing");
return -ENODEV;
@@ -502,7 +464,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
dev_err(dev, "gpio-fan,speed-map contains zero/odd number of entries");
return -ENODEV;
}
- pdata->num_speed = i / 2;
+ fan_data->num_speed = i / 2;
/*
* Populate speed map
@@ -510,12 +472,12 @@ static int gpio_fan_get_of_pdata(struct device *dev,
* this needs splitting into pairs to create gpio_fan_speed structs
*/
speed = devm_kzalloc(dev,
- pdata->num_speed * sizeof(struct gpio_fan_speed),
+ fan_data->num_speed * sizeof(struct gpio_fan_speed),
GFP_KERNEL);
if (!speed)
return -ENOMEM;
p = NULL;
- for (i = 0; i < pdata->num_speed; i++) {
+ for (i = 0; i < fan_data->num_speed; i++) {
p = of_prop_next_u32(prop, p, &u);
if (!p)
return -ENODEV;
@@ -525,7 +487,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
return -ENODEV;
speed[i].ctrl_val = u;
}
- pdata->speed = speed;
+ fan_data->speed = speed;
return 0;
}
@@ -535,76 +497,58 @@ static const struct of_device_id of_gpio_fan_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
-#endif /* CONFIG_OF_GPIO */
static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
struct gpio_fan_data *fan_data;
- struct gpio_fan_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
- fan_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_fan_data),
+ fan_data = devm_kzalloc(dev, sizeof(struct gpio_fan_data),
GFP_KERNEL);
if (!fan_data)
return -ENOMEM;
-#ifdef CONFIG_OF_GPIO
- if (!pdata) {
- pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct gpio_fan_platform_data),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- err = gpio_fan_get_of_pdata(&pdev->dev, pdata);
- if (err)
- return err;
- }
-#else /* CONFIG_OF_GPIO */
- if (!pdata)
- return -EINVAL;
-#endif /* CONFIG_OF_GPIO */
+ fan_data->dev = dev;
+ err = gpio_fan_get_of_data(fan_data);
+ if (err)
+ return err;
- fan_data->pdev = pdev;
platform_set_drvdata(pdev, fan_data);
mutex_init(&fan_data->lock);
/* Configure alarm GPIO if available. */
- if (pdata->alarm) {
- err = fan_alarm_init(fan_data, pdata->alarm);
+ if (fan_data->alarm_gpio) {
+ err = fan_alarm_init(fan_data);
if (err)
return err;
}
/* Configure control GPIOs if available. */
- if (pdata->ctrl && pdata->num_ctrl > 0) {
- if (!pdata->speed || pdata->num_speed <= 1)
+ if (fan_data->gpios && fan_data->num_gpios > 0) {
+ if (!fan_data->speed || fan_data->num_speed <= 1)
return -EINVAL;
- err = fan_ctrl_init(fan_data, pdata);
+ err = fan_ctrl_init(fan_data);
if (err)
return err;
}
/* Make this driver part of hwmon class. */
fan_data->hwmon_dev =
- devm_hwmon_device_register_with_groups(&pdev->dev,
+ devm_hwmon_device_register_with_groups(dev,
"gpio_fan", fan_data,
gpio_fan_groups);
if (IS_ERR(fan_data->hwmon_dev))
return PTR_ERR(fan_data->hwmon_dev);
-#ifdef CONFIG_OF_GPIO
+
/* Optional cooling device register for Device tree platforms */
- fan_data->cdev = thermal_of_cooling_device_register(pdev->dev.of_node,
+ fan_data->cdev = thermal_of_cooling_device_register(np,
"gpio-fan",
fan_data,
&gpio_fan_cool_ops);
-#else /* CONFIG_OF_GPIO */
- /* Optional cooling device register for non Device tree platforms */
- fan_data->cdev = thermal_cooling_device_register("gpio-fan", fan_data,
- &gpio_fan_cool_ops);
-#endif /* CONFIG_OF_GPIO */
- dev_info(&pdev->dev, "GPIO fan initialized\n");
+ dev_info(dev, "GPIO fan initialized\n");
return 0;
}
@@ -616,7 +560,7 @@ static int gpio_fan_remove(struct platform_device *pdev)
if (!IS_ERR(fan_data->cdev))
thermal_cooling_device_unregister(fan_data->cdev);
- if (fan_data->ctrl)
+ if (fan_data->gpios)
set_fan_speed(fan_data, 0);
return 0;
@@ -632,7 +576,7 @@ static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->ctrl) {
+ if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
set_fan_speed(fan_data, 0);
}
@@ -644,7 +588,7 @@ static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->ctrl)
+ if (fan_data->gpios)
set_fan_speed(fan_data, fan_data->resume_speed);
return 0;
@@ -663,9 +607,7 @@ static struct platform_driver gpio_fan_driver = {
.driver = {
.name = "gpio-fan",
.pm = GPIO_FAN_PM,
-#ifdef CONFIG_OF_GPIO
.of_match_table = of_match_ptr(of_gpio_fan_match),
-#endif
},
};
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index ce3b91f22e30..46a54ed23410 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -36,6 +36,10 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
/* Provide lock for writing to NB_SMU_IND_ADDR */
static DEFINE_MUTEX(nb_smu_ind_mutex);
+#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#endif
+
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -61,31 +65,72 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
*/
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
-static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
- int offset, u32 *val)
+/* F17h M01h Access througn SMN */
+#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
+
+struct k10temp_data {
+ struct pci_dev *pdev;
+ void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
+ int temp_offset;
+};
+
+struct tctl_offset {
+ u8 model;
+ char const *id;
+ int offset;
+};
+
+static const struct tctl_offset tctl_offset_table[] = {
+ { 0x17, "AMD Ryzen 7 1600X", 20000 },
+ { 0x17, "AMD Ryzen 7 1700X", 20000 },
+ { 0x17, "AMD Ryzen 7 1800X", 20000 },
+ { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+};
+
+static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
+{
+ pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
+}
+
+static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
+ unsigned int base, int offset, u32 *val)
{
mutex_lock(&nb_smu_ind_mutex);
pci_bus_write_config_dword(pdev->bus, devfn,
- 0xb8, offset);
+ base, offset);
pci_bus_read_config_dword(pdev->bus, devfn,
- 0xbc, val);
+ base + 4, val);
mutex_unlock(&nb_smu_ind_mutex);
}
+static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
+ F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
+}
+
+static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
+}
+
static ssize_t temp1_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct k10temp_data *data = dev_get_drvdata(dev);
u32 regval;
- struct pci_dev *pdev = dev_get_drvdata(dev);
-
- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model == 0x60) {
- amd_nb_smu_index_read(pdev, PCI_DEVFN(0, 0),
- F15H_M60H_REPORTED_TEMP_CTRL_OFFSET,
- &regval);
- } else {
- pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, &regval);
- }
- return sprintf(buf, "%u\n", (regval >> 21) * 125);
+ unsigned int temp;
+
+ data->read_tempreg(data->pdev, &regval);
+ temp = (regval >> 21) * 125;
+ temp -= data->temp_offset;
+
+ return sprintf(buf, "%u\n", temp);
}
static ssize_t temp1_max_show(struct device *dev,
@@ -98,11 +143,12 @@ static ssize_t show_temp_crit(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct k10temp_data *data = dev_get_drvdata(dev);
int show_hyst = attr->index;
u32 regval;
int value;
- pci_read_config_dword(dev_get_drvdata(dev),
+ pci_read_config_dword(data->pdev,
REG_HARDWARE_THERMAL_CONTROL, &regval);
value = ((regval >> 16) & 0x7f) * 500 + 52000;
if (show_hyst)
@@ -119,7 +165,8 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct pci_dev *pdev = dev_get_drvdata(dev);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ struct pci_dev *pdev = data->pdev;
if (index >= 2) {
u32 reg_caps, reg_htc;
@@ -187,7 +234,9 @@ static int k10temp_probe(struct pci_dev *pdev,
{
int unreliable = has_erratum_319(pdev);
struct device *dev = &pdev->dev;
+ struct k10temp_data *data;
struct device *hwmon_dev;
+ int i;
if (unreliable) {
if (!force) {
@@ -199,7 +248,31 @@ static int k10temp_probe(struct pci_dev *pdev,
"unreliable CPU thermal sensor; check erratum 319\n");
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", pdev,
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+
+ if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
+ boot_cpu_data.x86_model == 0x70))
+ data->read_tempreg = read_tempreg_nb_f15;
+ else if (boot_cpu_data.x86 == 0x17)
+ data->read_tempreg = read_tempreg_nb_f17;
+ else
+ data->read_tempreg = read_tempreg_pci;
+
+ for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
+ const struct tctl_offset *entry = &tctl_offset_table[i];
+
+ if (boot_cpu_data.x86 == entry->model &&
+ strstr(boot_cpu_data.x86_model_id, entry->id)) {
+ data->temp_offset = entry->offset;
+ break;
+ }
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
k10temp_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -214,6 +287,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index a18278938494..76d966932941 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -303,10 +303,20 @@ static const struct i2c_device_id max1619_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max1619_id);
+#ifdef CONFIG_OF
+static const struct of_device_id max1619_of_match[] = {
+ { .compatible = "maxim,max1619", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, max1619_of_match);
+#endif
+
static struct i2c_driver max1619_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max1619",
+ .of_match_table = of_match_ptr(max1619_of_match),
},
.probe = max1619_probe,
.id_table = max1619_id,
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
new file mode 100644
index 000000000000..35555f0eefb9
--- /dev/null
+++ b/drivers/hwmon/max6621.c
@@ -0,0 +1,593 @@
+/*
+ * Hardware monitoring driver for Maxim MAX6621
+ *
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define MAX6621_DRV_NAME "max6621"
+#define MAX6621_TEMP_INPUT_REG_NUM 9
+#define MAX6621_TEMP_INPUT_MIN -127000
+#define MAX6621_TEMP_INPUT_MAX 128000
+#define MAX6621_TEMP_ALERT_CHAN_SHIFT 1
+
+#define MAX6621_TEMP_S0D0_REG 0x00
+#define MAX6621_TEMP_S0D1_REG 0x01
+#define MAX6621_TEMP_S1D0_REG 0x02
+#define MAX6621_TEMP_S1D1_REG 0x03
+#define MAX6621_TEMP_S2D0_REG 0x04
+#define MAX6621_TEMP_S2D1_REG 0x05
+#define MAX6621_TEMP_S3D0_REG 0x06
+#define MAX6621_TEMP_S3D1_REG 0x07
+#define MAX6621_TEMP_MAX_REG 0x08
+#define MAX6621_TEMP_MAX_ADDR_REG 0x0a
+#define MAX6621_TEMP_ALERT_CAUSE_REG 0x0b
+#define MAX6621_CONFIG0_REG 0x0c
+#define MAX6621_CONFIG1_REG 0x0d
+#define MAX6621_CONFIG2_REG 0x0e
+#define MAX6621_CONFIG3_REG 0x0f
+#define MAX6621_TEMP_S0_ALERT_REG 0x10
+#define MAX6621_TEMP_S1_ALERT_REG 0x11
+#define MAX6621_TEMP_S2_ALERT_REG 0x12
+#define MAX6621_TEMP_S3_ALERT_REG 0x13
+#define MAX6621_CLEAR_ALERT_REG 0x15
+#define MAX6621_REG_MAX (MAX6621_CLEAR_ALERT_REG + 1)
+#define MAX6621_REG_TEMP_SHIFT 0x06
+
+#define MAX6621_ENABLE_TEMP_ALERTS_BIT 4
+#define MAX6621_ENABLE_I2C_CRC_BIT 5
+#define MAX6621_ENABLE_ALTERNATE_DATA 6
+#define MAX6621_ENABLE_LOCKUP_TO 7
+#define MAX6621_ENABLE_S0D0_BIT 8
+#define MAX6621_ENABLE_S3D1_BIT 15
+#define MAX6621_ENABLE_TEMP_ALL GENMASK(MAX6621_ENABLE_S3D1_BIT, \
+ MAX6621_ENABLE_S0D0_BIT)
+#define MAX6621_POLL_DELAY_MASK 0x5
+#define MAX6621_CONFIG0_INIT (MAX6621_ENABLE_TEMP_ALL | \
+ BIT(MAX6621_ENABLE_LOCKUP_TO) | \
+ BIT(MAX6621_ENABLE_I2C_CRC_BIT) | \
+ MAX6621_POLL_DELAY_MASK)
+#define MAX6621_PECI_BIT_TIME 0x2
+#define MAX6621_PECI_RETRY_NUM 0x3
+#define MAX6621_CONFIG1_INIT ((MAX6621_PECI_BIT_TIME << 8) | \
+ MAX6621_PECI_RETRY_NUM)
+
+/* Error codes */
+#define MAX6621_TRAN_FAILED 0x8100 /*
+ * PECI transaction failed for more
+ * than the configured number of
+ * consecutive retries.
+ */
+#define MAX6621_POOL_DIS 0x8101 /*
+ * Polling disabled for requested
+ * socket/domain.
+ */
+#define MAX6621_POOL_UNCOMPLETE 0x8102 /*
+ * First poll not yet completed for
+ * requested socket/domain (on
+ * startup).
+ */
+#define MAX6621_SD_DIS 0x8103 /*
+ * Read maximum temperature requested,
+ * but no sockets/domains enabled or
+ * all enabled sockets/domains have
+ * errors; or read maximum temperature
+ * address requested, but read maximum
+ * temperature was not called.
+ */
+#define MAX6621_ALERT_DIS 0x8104 /*
+ * Get alert socket/domain requested,
+ * but no alert active.
+ */
+#define MAX6621_PECI_ERR_MIN 0x8000 /* Intel spec PECI error min value. */
+#define MAX6621_PECI_ERR_MAX 0x80ff /* Intel spec PECI error max value. */
+
+static const u32 max6621_temp_regs[] = {
+ MAX6621_TEMP_MAX_REG, MAX6621_TEMP_S0D0_REG, MAX6621_TEMP_S1D0_REG,
+ MAX6621_TEMP_S2D0_REG, MAX6621_TEMP_S3D0_REG, MAX6621_TEMP_S0D1_REG,
+ MAX6621_TEMP_S1D1_REG, MAX6621_TEMP_S2D1_REG, MAX6621_TEMP_S3D1_REG,
+};
+
+static const char *const max6621_temp_labels[] = {
+ "maximum",
+ "socket0_0",
+ "socket1_0",
+ "socket2_0",
+ "socket3_0",
+ "socket0_1",
+ "socket1_1",
+ "socket2_1",
+ "socket3_1",
+};
+
+static const int max6621_temp_alert_chan2reg[] = {
+ MAX6621_TEMP_S0_ALERT_REG,
+ MAX6621_TEMP_S1_ALERT_REG,
+ MAX6621_TEMP_S2_ALERT_REG,
+ MAX6621_TEMP_S3_ALERT_REG,
+};
+
+/**
+ * struct max6621_data - private data:
+ *
+ * @client: I2C client;
+ * @regmap: register map handle;
+ * @input_chan2reg: mapping from channel to register;
+ */
+struct max6621_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ int input_chan2reg[MAX6621_TEMP_INPUT_REG_NUM + 1];
+};
+
+static long max6621_temp_mc2reg(long val)
+{
+ return (val / 1000L) << MAX6621_REG_TEMP_SHIFT;
+}
+
+static umode_t
+max6621_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ /* Skip channels which are not physically conncted. */
+ if (((struct max6621_data *)data)->input_chan2reg[channel] < 0)
+ return 0;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_offset:
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int max6621_verify_reg_data(struct device *dev, int regval)
+{
+ if (regval >= MAX6621_PECI_ERR_MIN &&
+ regval <= MAX6621_PECI_ERR_MAX) {
+ dev_dbg(dev, "PECI error code - err 0x%04x.\n",
+ regval);
+
+ return -EIO;
+ }
+
+ switch (regval) {
+ case MAX6621_TRAN_FAILED:
+ dev_dbg(dev, "PECI transaction failed - err 0x%04x.\n",
+ regval);
+ return -EIO;
+ case MAX6621_POOL_DIS:
+ dev_dbg(dev, "Polling disabled - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ case MAX6621_POOL_UNCOMPLETE:
+ dev_dbg(dev, "First poll not completed on startup - err 0x%04x.\n",
+ regval);
+ return -EIO;
+ case MAX6621_SD_DIS:
+ dev_dbg(dev, "Resource is disabled - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ case MAX6621_ALERT_DIS:
+ dev_dbg(dev, "No alert active - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ default:
+ return 0;
+ }
+}
+
+static int
+max6621_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct max6621_data *data = dev_get_drvdata(dev);
+ u32 regval;
+ int reg;
+ s8 temp;
+ int ret;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = data->input_chan2reg[channel];
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ /*
+ * Bit MAX6621_REG_TEMP_SHIFT represents 1 degree step.
+ * The temperature is given in two's complement and 8
+ * bits is used for the register conversion.
+ */
+ temp = (regval >> MAX6621_REG_TEMP_SHIFT);
+ *val = temp * 1000L;
+
+ break;
+ case hwmon_temp_offset:
+ ret = regmap_read(data->regmap, MAX6621_CONFIG2_REG,
+ &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ *val = (regval >> MAX6621_REG_TEMP_SHIFT) *
+ 1000L;
+
+ break;
+ case hwmon_temp_crit:
+ channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT;
+ reg = max6621_temp_alert_chan2reg[channel];
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ *val = regval * 1000L;
+
+ break;
+ case hwmon_temp_crit_alarm:
+ /*
+ * Set val to zero to recover the case, when reading
+ * MAX6621_TEMP_ALERT_CAUSE_REG results in for example
+ * MAX6621_ALERT_DIS. Reading will return with error,
+ * but in such case alarm should be returned as 0.
+ */
+ *val = 0;
+ ret = regmap_read(data->regmap,
+ MAX6621_TEMP_ALERT_CAUSE_REG,
+ &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret) {
+ /* Do not report error if alert is disabled. */
+ if (regval == MAX6621_ALERT_DIS)
+ return 0;
+ else
+ return ret;
+ }
+
+ /*
+ * Clear the alert automatically, using send-byte
+ * smbus protocol for clearing alert.
+ */
+ if (regval) {
+ ret = i2c_smbus_write_byte(data->client,
+ MAX6621_CLEAR_ALERT_REG);
+ if (ret)
+ return ret;
+ }
+
+ *val = !!regval;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+max6621_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct max6621_data *data = dev_get_drvdata(dev);
+ u32 reg;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_offset:
+ /* Clamp to allowed range to prevent overflow. */
+ val = clamp_val(val, MAX6621_TEMP_INPUT_MIN,
+ MAX6621_TEMP_INPUT_MAX);
+ val = max6621_temp_mc2reg(val);
+
+ return regmap_write(data->regmap,
+ MAX6621_CONFIG2_REG, val);
+ case hwmon_temp_crit:
+ channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT;
+ reg = max6621_temp_alert_chan2reg[channel];
+ /* Clamp to allowed range to prevent overflow. */
+ val = clamp_val(val, MAX6621_TEMP_INPUT_MIN,
+ MAX6621_TEMP_INPUT_MAX);
+ val = val / 1000L;
+
+ return regmap_write(data->regmap, reg, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+max6621_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = max6621_temp_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool max6621_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_CONFIG0_REG:
+ case MAX6621_CONFIG1_REG:
+ case MAX6621_CONFIG2_REG:
+ case MAX6621_CONFIG3_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ case MAX6621_TEMP_ALERT_CAUSE_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool max6621_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_TEMP_S0D0_REG:
+ case MAX6621_TEMP_S0D1_REG:
+ case MAX6621_TEMP_S1D0_REG:
+ case MAX6621_TEMP_S1D1_REG:
+ case MAX6621_TEMP_S2D0_REG:
+ case MAX6621_TEMP_S2D1_REG:
+ case MAX6621_TEMP_S3D0_REG:
+ case MAX6621_TEMP_S3D1_REG:
+ case MAX6621_TEMP_MAX_REG:
+ case MAX6621_TEMP_MAX_ADDR_REG:
+ case MAX6621_CONFIG0_REG:
+ case MAX6621_CONFIG1_REG:
+ case MAX6621_CONFIG2_REG:
+ case MAX6621_CONFIG3_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool max6621_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_TEMP_S0D0_REG:
+ case MAX6621_TEMP_S0D1_REG:
+ case MAX6621_TEMP_S1D0_REG:
+ case MAX6621_TEMP_S1D1_REG:
+ case MAX6621_TEMP_S2D0_REG:
+ case MAX6621_TEMP_S2D1_REG:
+ case MAX6621_TEMP_S3D0_REG:
+ case MAX6621_TEMP_S3D1_REG:
+ case MAX6621_TEMP_MAX_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ case MAX6621_TEMP_ALERT_CAUSE_REG:
+ return true;
+ }
+ return false;
+}
+
+static const struct reg_default max6621_regmap_default[] = {
+ { MAX6621_CONFIG0_REG, MAX6621_CONFIG0_INIT },
+ { MAX6621_CONFIG1_REG, MAX6621_CONFIG1_INIT },
+};
+
+static const struct regmap_config max6621_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = MAX6621_REG_MAX,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = max6621_writeable_reg,
+ .readable_reg = max6621_readable_reg,
+ .volatile_reg = max6621_volatile_reg,
+ .reg_defaults = max6621_regmap_default,
+ .num_reg_defaults = ARRAY_SIZE(max6621_regmap_default),
+};
+
+static u32 max6621_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info max6621_chip = {
+ .type = hwmon_chip,
+ .config = max6621_chip_config,
+};
+
+static const u32 max6621_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ 0
+};
+
+static const struct hwmon_channel_info max6621_temp = {
+ .type = hwmon_temp,
+ .config = max6621_temp_config,
+};
+
+static const struct hwmon_channel_info *max6621_info[] = {
+ &max6621_chip,
+ &max6621_temp,
+ NULL
+};
+
+static const struct hwmon_ops max6621_hwmon_ops = {
+ .read = max6621_read,
+ .write = max6621_write,
+ .read_string = max6621_read_string,
+ .is_visible = max6621_is_visible,
+};
+
+static const struct hwmon_chip_info max6621_chip_info = {
+ .ops = &max6621_hwmon_ops,
+ .info = max6621_info,
+};
+
+static int max6621_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct max6621_data *data;
+ struct device *hwmon_dev;
+ int i;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = devm_regmap_init_i2c(client, &max6621_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ i2c_set_clientdata(client, data);
+ data->client = client;
+
+ /* Set CONFIG0 register masking temperature alerts and PEC. */
+ ret = regmap_write(data->regmap, MAX6621_CONFIG0_REG,
+ MAX6621_CONFIG0_INIT);
+ if (ret)
+ return ret;
+
+ /* Set CONFIG1 register for PEC access retry number. */
+ ret = regmap_write(data->regmap, MAX6621_CONFIG1_REG,
+ MAX6621_CONFIG1_INIT);
+ if (ret)
+ return ret;
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(data->regmap);
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ return ret;
+
+ /* Verify which temperature input registers are enabled. */
+ for (i = 0; i < MAX6621_TEMP_INPUT_REG_NUM; i++) {
+ ret = i2c_smbus_read_word_data(client, max6621_temp_regs[i]);
+ if (ret < 0)
+ return ret;
+ ret = max6621_verify_reg_data(dev, ret);
+ if (ret) {
+ data->input_chan2reg[i] = -1;
+ continue;
+ }
+
+ data->input_chan2reg[i] = max6621_temp_regs[i];
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max6621_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max6621_id[] = {
+ { MAX6621_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6621_id);
+
+static const struct of_device_id max6621_of_match[] = {
+ { .compatible = "maxim,max6621" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max6621_of_match);
+
+static struct i2c_driver max6621_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = MAX6621_DRV_NAME,
+ .of_match_table = of_match_ptr(max6621_of_match),
+ },
+ .probe = max6621_probe,
+ .id_table = max6621_id,
+};
+
+module_i2c_driver(max6621_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Driver for Maxim MAX6621");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 40019325b517..08479006c7f9 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -114,6 +114,16 @@ config SENSORS_MAX20751
This driver can also be built as a module. If so, the module will
be called max20751.
+config SENSORS_MAX31785
+ tristate "Maxim MAX31785 and compatibles"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX31785.
+
+ This driver can also be built as a module. If so, the module will
+ be called max31785.
+
config SENSORS_MAX34440
tristate "Maxim MAX34440 and compatibles"
default n
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index e9364420a512..ea0e39518c21 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
+obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
new file mode 100644
index 000000000000..9313849d5160
--- /dev/null
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum max31785_regs {
+ MFR_REVISION = 0x9b,
+};
+
+#define MAX31785_NR_PAGES 23
+
+#define MAX31785_FAN_FUNCS \
+ (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12)
+
+#define MAX31785_TEMP_FUNCS \
+ (PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
+
+#define MAX31785_VOUT_FUNCS \
+ (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
+
+static const struct pmbus_driver_info max31785_info = {
+ .pages = MAX31785_NR_PAGES,
+
+ /* RPM */
+ .format[PSC_FAN] = direct,
+ .m[PSC_FAN] = 1,
+ .b[PSC_FAN] = 0,
+ .R[PSC_FAN] = 0,
+ .func[0] = MAX31785_FAN_FUNCS,
+ .func[1] = MAX31785_FAN_FUNCS,
+ .func[2] = MAX31785_FAN_FUNCS,
+ .func[3] = MAX31785_FAN_FUNCS,
+ .func[4] = MAX31785_FAN_FUNCS,
+ .func[5] = MAX31785_FAN_FUNCS,
+
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[6] = MAX31785_TEMP_FUNCS,
+ .func[7] = MAX31785_TEMP_FUNCS,
+ .func[8] = MAX31785_TEMP_FUNCS,
+ .func[9] = MAX31785_TEMP_FUNCS,
+ .func[10] = MAX31785_TEMP_FUNCS,
+ .func[11] = MAX31785_TEMP_FUNCS,
+ .func[12] = MAX31785_TEMP_FUNCS,
+ .func[13] = MAX31785_TEMP_FUNCS,
+ .func[14] = MAX31785_TEMP_FUNCS,
+ .func[15] = MAX31785_TEMP_FUNCS,
+ .func[16] = MAX31785_TEMP_FUNCS,
+
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .func[17] = MAX31785_VOUT_FUNCS,
+ .func[18] = MAX31785_VOUT_FUNCS,
+ .func[19] = MAX31785_VOUT_FUNCS,
+ .func[20] = MAX31785_VOUT_FUNCS,
+ .func[21] = MAX31785_VOUT_FUNCS,
+ .func[22] = MAX31785_VOUT_FUNCS,
+};
+
+static int max31785_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ s64 ret;
+
+ info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ *info = max31785_info;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 255);
+ if (ret < 0)
+ return ret;
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id max31785_id[] = {
+ { "max31785", 0 },
+ { "max31785a", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max31785_id);
+
+static struct i2c_driver max31785_driver = {
+ .driver = {
+ .name = "max31785",
+ },
+ .probe = max31785_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max31785_id,
+};
+
+module_i2c_driver(max31785_driver);
+
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("PMBus driver for the Maxim MAX31785");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 4efa2bd4f6d8..fa613bd209e3 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -404,9 +404,9 @@ extern const struct regulator_ops pmbus_regulator_ops;
/* Function declarations */
void pmbus_clear_cache(struct i2c_client *client);
-int pmbus_set_page(struct i2c_client *client, u8 page);
-int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
-int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
+int pmbus_set_page(struct i2c_client *client, int page);
+int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg);
+int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word);
int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg,
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 302f0aef59de..52a58b8b6e1b 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -136,13 +136,13 @@ void pmbus_clear_cache(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_clear_cache);
-int pmbus_set_page(struct i2c_client *client, u8 page)
+int pmbus_set_page(struct i2c_client *client, int page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
int rv = 0;
int newpage;
- if (page != data->currpage) {
+ if (page >= 0 && page != data->currpage) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
if (newpage != page)
@@ -158,11 +158,9 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
int rv;
- if (page >= 0) {
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
- }
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
return i2c_smbus_write_byte(client, value);
}
@@ -186,7 +184,8 @@ static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
return pmbus_write_byte(client, page, value);
}
-int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
+int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
+ u16 word)
{
int rv;
@@ -219,7 +218,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
return pmbus_write_word_data(client, page, reg, word);
}
-int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
+int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
@@ -255,11 +254,9 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
- if (page >= 0) {
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
- }
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
return i2c_smbus_read_byte_data(client, reg);
}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index e4d642b673c6..25d28343ba93 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -18,13 +18,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/mutex.h>
-#include <linux/platform_data/sht15.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -34,7 +32,8 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/bitrev.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
/* Commands */
#define SHT15_MEASURE_TEMP 0x03
@@ -122,7 +121,8 @@ static const u8 sht15_crc8_table[] = {
/**
* struct sht15_data - device instance specific data
- * @pdata: platform data (gpio's etc).
+ * @sck: clock GPIO line
+ * @data: data GPIO line
* @read_work: bh of interrupt handler.
* @wait_queue: wait queue for getting values from device.
* @val_temp: last temperature value read from device.
@@ -150,7 +150,8 @@ static const u8 sht15_crc8_table[] = {
* @interrupt_handled: flag used to indicate a handler has been scheduled.
*/
struct sht15_data {
- struct sht15_platform_data *pdata;
+ struct gpio_desc *sck;
+ struct gpio_desc *data;
struct work_struct read_work;
wait_queue_head_t wait_queue;
uint16_t val_temp;
@@ -205,16 +206,16 @@ static int sht15_connection_reset(struct sht15_data *data)
{
int i, err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
for (i = 0; i < 9; ++i) {
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
}
return 0;
@@ -227,11 +228,11 @@ static int sht15_connection_reset(struct sht15_data *data)
*/
static inline void sht15_send_bit(struct sht15_data *data, int val)
{
- gpio_set_value(data->pdata->gpio_data, val);
+ gpiod_set_value(data->data, val);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL); /* clock low time */
}
@@ -248,23 +249,23 @@ static int sht15_transmission_start(struct sht15_data *data)
int err;
/* ensure data is high and output */
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_data, 0);
+ gpiod_set_value(data->data, 0);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_data, 1);
+ gpiod_set_value(data->data, 1);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -292,20 +293,20 @@ static int sht15_wait_for_response(struct sht15_data *data)
{
int err;
- err = gpio_direction_input(data->pdata->gpio_data);
+ err = gpiod_direction_input(data->data);
if (err)
return err;
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- if (gpio_get_value(data->pdata->gpio_data)) {
- gpio_set_value(data->pdata->gpio_sck, 0);
+ if (gpiod_get_value(data->data)) {
+ gpiod_set_value(data->sck, 0);
dev_err(data->dev, "Command not acknowledged\n");
err = sht15_connection_reset(data);
if (err)
return err;
return -EIO;
}
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -360,17 +361,17 @@ static int sht15_ack(struct sht15_data *data)
{
int err;
- err = gpio_direction_output(data->pdata->gpio_data, 0);
+ err = gpiod_direction_output(data->data, 0);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_data, 1);
+ gpiod_set_value(data->data, 1);
- return gpio_direction_input(data->pdata->gpio_data);
+ return gpiod_direction_input(data->data);
}
/**
@@ -383,13 +384,13 @@ static int sht15_end_transmission(struct sht15_data *data)
{
int err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -405,10 +406,10 @@ static u8 sht15_read_byte(struct sht15_data *data)
for (i = 0; i < 8; ++i) {
byte <<= 1;
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- byte |= !!gpio_get_value(data->pdata->gpio_data);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ byte |= !!gpiod_get_value(data->data);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
}
return byte;
@@ -428,7 +429,7 @@ static int sht15_send_status(struct sht15_data *data, u8 status)
err = sht15_send_cmd(data, SHT15_WRITE_STATUS);
if (err)
return err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
@@ -528,14 +529,14 @@ static int sht15_measurement(struct sht15_data *data,
if (ret)
return ret;
- ret = gpio_direction_input(data->pdata->gpio_data);
+ ret = gpiod_direction_input(data->data);
if (ret)
return ret;
atomic_set(&data->interrupt_handled, 0);
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
- if (gpio_get_value(data->pdata->gpio_data) == 0) {
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ enable_irq(gpiod_to_irq(data->data));
+ if (gpiod_get_value(data->data) == 0) {
+ disable_irq_nosync(gpiod_to_irq(data->data));
/* Only relevant if the interrupt hasn't occurred. */
if (!atomic_read(&data->interrupt_handled))
schedule_work(&data->read_work);
@@ -547,7 +548,7 @@ static int sht15_measurement(struct sht15_data *data,
data->state = SHT15_READING_NOTHING;
return -EIO;
} else if (ret == 0) { /* timeout occurred */
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ disable_irq_nosync(gpiod_to_irq(data->data));
ret = sht15_connection_reset(data);
if (ret)
return ret;
@@ -826,15 +827,15 @@ static void sht15_bh_read_data(struct work_struct *work_s)
read_work);
/* Firstly, verify the line is low */
- if (gpio_get_value(data->pdata->gpio_data)) {
+ if (gpiod_get_value(data->data)) {
/*
* If not, then start the interrupt again - care here as could
* have gone low in meantime so verify it hasn't!
*/
atomic_set(&data->interrupt_handled, 0);
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ enable_irq(gpiod_to_irq(data->data));
/* If still not occurred or another handler was scheduled */
- if (gpio_get_value(data->pdata->gpio_data)
+ if (gpiod_get_value(data->data)
|| atomic_read(&data->interrupt_handled))
return;
}
@@ -918,53 +919,12 @@ static const struct of_device_id sht15_dt_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, sht15_dt_match);
-
-/*
- * This function returns NULL if pdev isn't a device instatiated by dt,
- * a pointer to pdata if it could successfully get all information
- * from dt or a negative ERR_PTR() on error.
- */
-static struct sht15_platform_data *sht15_probe_dt(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct sht15_platform_data *pdata;
-
- /* no device tree device */
- if (!np)
- return NULL;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->gpio_data = of_get_named_gpio(np, "data-gpios", 0);
- if (pdata->gpio_data < 0) {
- if (pdata->gpio_data != -EPROBE_DEFER)
- dev_err(dev, "data-gpios not found\n");
- return ERR_PTR(pdata->gpio_data);
- }
-
- pdata->gpio_sck = of_get_named_gpio(np, "clk-gpios", 0);
- if (pdata->gpio_sck < 0) {
- if (pdata->gpio_sck != -EPROBE_DEFER)
- dev_err(dev, "clk-gpios not found\n");
- return ERR_PTR(pdata->gpio_sck);
- }
-
- return pdata;
-}
-#else
-static inline struct sht15_platform_data *sht15_probe_dt(struct device *dev)
-{
- return NULL;
-}
#endif
static int sht15_probe(struct platform_device *pdev)
{
int ret;
struct sht15_data *data;
- u8 status = 0;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -977,25 +937,6 @@ static int sht15_probe(struct platform_device *pdev)
data->dev = &pdev->dev;
init_waitqueue_head(&data->wait_queue);
- data->pdata = sht15_probe_dt(&pdev->dev);
- if (IS_ERR(data->pdata))
- return PTR_ERR(data->pdata);
- if (data->pdata == NULL) {
- data->pdata = dev_get_platdata(&pdev->dev);
- if (data->pdata == NULL) {
- dev_err(&pdev->dev, "no platform data supplied\n");
- return -EINVAL;
- }
- }
-
- data->supply_uv = data->pdata->supply_mv * 1000;
- if (data->pdata->checksum)
- data->checksumming = true;
- if (data->pdata->no_otp_reload)
- status |= SHT15_STATUS_NO_OTP_RELOAD;
- if (data->pdata->low_resolution)
- status |= SHT15_STATUS_LOW_RESOLUTION;
-
/*
* If a regulator is available,
* query what the supply voltage actually is!
@@ -1030,21 +971,20 @@ static int sht15_probe(struct platform_device *pdev)
}
/* Try requesting the GPIOs */
- ret = devm_gpio_request_one(&pdev->dev, data->pdata->gpio_sck,
- GPIOF_OUT_INIT_LOW, "SHT15 sck");
- if (ret) {
+ data->sck = devm_gpiod_get(&pdev->dev, "clk", GPIOD_OUT_LOW);
+ if (IS_ERR(data->sck)) {
+ ret = PTR_ERR(data->sck);
dev_err(&pdev->dev, "clock line GPIO request failed\n");
goto err_release_reg;
}
-
- ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data,
- "SHT15 data");
- if (ret) {
+ data->data = devm_gpiod_get(&pdev->dev, "data", GPIOD_IN);
+ if (IS_ERR(data->data)) {
+ ret = PTR_ERR(data->data);
dev_err(&pdev->dev, "data line GPIO request failed\n");
goto err_release_reg;
}
- ret = devm_request_irq(&pdev->dev, gpio_to_irq(data->pdata->gpio_data),
+ ret = devm_request_irq(&pdev->dev, gpiod_to_irq(data->data),
sht15_interrupt_fired,
IRQF_TRIGGER_FALLING,
"sht15 data",
@@ -1053,7 +993,7 @@ static int sht15_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get irq for data line\n");
goto err_release_reg;
}
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ disable_irq_nosync(gpiod_to_irq(data->data));
ret = sht15_connection_reset(data);
if (ret)
goto err_release_reg;
@@ -1061,13 +1001,6 @@ static int sht15_probe(struct platform_device *pdev)
if (ret)
goto err_release_reg;
- /* write status with platform data options */
- if (status) {
- ret = sht15_send_status(data, status);
- if (ret)
- goto err_release_reg;
- }
-
ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group);
if (ret) {
dev_err(&pdev->dev, "sysfs create failed\n");
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 3f940fb67dc6..7fe152d92350 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -396,7 +396,7 @@ static ssize_t show_max_alarm(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->max_alert);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->max_alert);
}
static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
@@ -413,7 +413,7 @@ static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->min_alert);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->min_alert);
}
static ssize_t show_input(struct device *dev, struct device_attribute *attr,
@@ -428,7 +428,7 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->temp);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->temp);
}
static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
@@ -436,7 +436,7 @@ static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm);
}
static ssize_t set_therm(struct device *dev, struct device_attribute *attr,
@@ -478,7 +478,7 @@ static ssize_t show_hyst(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->hyst);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->hyst);
}
static ssize_t set_hyst(struct device *dev, struct device_attribute *attr,
@@ -518,7 +518,7 @@ static ssize_t show_therm_trip(struct device *dev,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm_trip);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm_trip);
}
static ssize_t show_max(struct device *dev, struct device_attribute *attr,
@@ -526,7 +526,7 @@ static ssize_t show_max(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_max);
}
static ssize_t set_max(struct device *dev, struct device_attribute *attr,
@@ -560,7 +560,7 @@ static ssize_t show_min(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_min);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_min);
}
static ssize_t set_min(struct device *dev, struct device_attribute *attr,
@@ -594,7 +594,7 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ return snprintf(buf, PAGE_SIZE, "%d\n",
stts751_intervals[priv->interval]);
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index dab5c515d5a3..5ba9d9f1daa1 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1676,7 +1676,9 @@ static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+ static const int watchdog_minors[] = {
+ WATCHDOG_MINOR, 212, 213, 214, 215
+ };
struct w83793_data *data;
int i, tmp, val, err;
int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index e1be61095532..a3cd91f23267 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -91,6 +91,11 @@
#define to_xgene_hwmon_dev(cl) \
container_of(cl, struct xgene_hwmon_dev, mbox_client)
+enum xgene_hwmon_version {
+ XGENE_HWMON_V1 = 0,
+ XGENE_HWMON_V2 = 1,
+};
+
struct slimpro_resp_msg {
u32 msg;
u32 param1;
@@ -609,6 +614,15 @@ static void xgene_hwmon_tx_done(struct mbox_client *cl, void *msg, int ret)
}
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
+ {"APMC0D29", XGENE_HWMON_V1},
+ {"APMC0D8A", XGENE_HWMON_V2},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
+#endif
+
static int xgene_hwmon_probe(struct platform_device *pdev)
{
struct xgene_hwmon_dev *ctx;
@@ -651,6 +665,15 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
} else {
struct acpi_pcct_hw_reduced *cppc_ss;
+ const struct acpi_device_id *acpi_id;
+ int version;
+
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ if (!acpi_id)
+ return -EINVAL;
+
+ version = (int)acpi_id->driver_data;
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
@@ -693,7 +716,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
*/
ctx->comm_base_addr = cppc_ss->base_address;
if (ctx->comm_base_addr) {
- ctx->pcc_comm_addr = memremap(ctx->comm_base_addr,
+ if (version == XGENE_HWMON_V2)
+ ctx->pcc_comm_addr = (void __force *)ioremap(
+ ctx->comm_base_addr,
+ cppc_ss->length);
+ else
+ ctx->pcc_comm_addr = memremap(
+ ctx->comm_base_addr,
cppc_ss->length,
MEMREMAP_WB);
} else {
@@ -761,14 +790,6 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
- {"APMC0D29", 0},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
-#endif
-
static const struct of_device_id xgene_hwmon_of_match[] = {
{.compatible = "apm,xgene-slimpro-hwmon"},
{}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index a7355ab3bb22..6ff0be8cbdc9 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -867,11 +867,16 @@ static void msf_from_bcd(struct atapi_msf *msf)
int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
{
struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *cdi = &info->devinfo;
+ struct cdrom_device_info *cdi;
unsigned char cmd[BLK_MAX_CDB];
ide_debug_log(IDE_DBG_FUNC, "enter");
+ if (!info)
+ return -EIO;
+
+ cdi = &info->devinfo;
+
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_TEST_UNIT_READY;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 6d6b092e2da9..d6135900da64 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
+ { "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index 225025a0940c..b6ccf39c6a7b 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -312,7 +312,7 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_smb->xport.dev = &client->dev;
rmi_smb->xport.pdata = *pdata;
rmi_smb->xport.pdata.irq = client->irq;
- rmi_smb->xport.proto_name = "smb2";
+ rmi_smb->xport.proto_name = "smb";
rmi_smb->xport.ops = &rmi_smb_ops;
smbus_version = rmi_smb_get_version(rmi_smb);
@@ -322,7 +322,7 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
smbus_version);
- if (smbus_version != 2) {
+ if (smbus_version != 2 && smbus_version != 3) {
dev_err(&client->dev, "Unrecognized SMB version %d\n",
smbus_version);
return -ENODEV;
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 88ea5e1b72ae..abf27578beb1 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -531,6 +531,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
input_set_drvdata(input_dev, ts);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X,
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2ad7b5c69156..ea80ff4cd7f9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -28,6 +28,7 @@
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
+#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
@@ -86,6 +87,7 @@ static int max_devices;
#define MAX_DEVICES 256
static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_IDA(mmc_rpmb_ida);
/*
* There is one mmc_blk_data per slot.
@@ -96,6 +98,7 @@ struct mmc_blk_data {
struct gendisk *disk;
struct mmc_queue queue;
struct list_head part;
+ struct list_head rpmbs;
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
@@ -121,6 +124,32 @@ struct mmc_blk_data {
int area_type;
};
+/* Device type for RPMB character devices */
+static dev_t mmc_rpmb_devt;
+
+/* Bus type for RPMB character devices */
+static struct bus_type mmc_rpmb_bus_type = {
+ .name = "mmc_rpmb",
+};
+
+/**
+ * struct mmc_rpmb_data - special RPMB device type for these areas
+ * @dev: the device for the RPMB area
+ * @chrdev: character device for the RPMB area
+ * @id: unique device ID number
+ * @part_index: partition index (0 on first)
+ * @md: parent MMC block device
+ * @node: list item, so we can put this device on a list
+ */
+struct mmc_rpmb_data {
+ struct device dev;
+ struct cdev chrdev;
+ int id;
+ unsigned int part_index;
+ struct mmc_blk_data *md;
+ struct list_head node;
+};
+
static DEFINE_MUTEX(open_lock);
module_param(perdev_minors, int, 0444);
@@ -299,6 +328,7 @@ struct mmc_blk_ioc_data {
struct mmc_ioc_cmd ic;
unsigned char *buf;
u64 buf_bytes;
+ struct mmc_rpmb_data *rpmb;
};
static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
@@ -437,14 +467,25 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_request mrq = {};
struct scatterlist sg;
int err;
- bool is_rpmb = false;
+ unsigned int target_part;
u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- is_rpmb = true;
+ /*
+ * The RPMB accesses comes in from the character device, so we
+ * need to target these explicitly. Else we just target the
+ * partition type for the block device the ioctl() was issued
+ * on.
+ */
+ if (idata->rpmb) {
+ /* Support multiple RPMB partitions */
+ target_part = idata->rpmb->part_index;
+ target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
+ } else {
+ target_part = md->part_type;
+ }
cmd.opcode = idata->ic.opcode;
cmd.arg = idata->ic.arg;
@@ -488,7 +529,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
mrq.cmd = &cmd;
- err = mmc_blk_part_switch(card, md->part_type);
+ err = mmc_blk_part_switch(card, target_part);
if (err)
return err;
@@ -498,7 +539,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return err;
}
- if (is_rpmb) {
+ if (idata->rpmb) {
err = mmc_set_blockcount(card, data.blocks,
idata->ic.write_flag & (1 << 31));
if (err)
@@ -538,7 +579,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (is_rpmb) {
+ if (idata->rpmb) {
/*
* Ensure RPMB command has completed by polling CMD13
* "Send Status".
@@ -554,7 +595,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
}
static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_cmd __user *ic_ptr)
+ struct mmc_ioc_cmd __user *ic_ptr,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data *idata;
struct mmc_blk_ioc_data *idatas[1];
@@ -566,6 +608,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR(idata))
return PTR_ERR(idata);
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata->rpmb = rpmb;
card = md->queue.card;
if (IS_ERR(card)) {
@@ -581,7 +625,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
idatas[0] = idata;
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -596,7 +641,8 @@ cmd_done:
}
static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_multi_cmd __user *user)
+ struct mmc_ioc_multi_cmd __user *user,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data **idata = NULL;
struct mmc_ioc_cmd __user *cmds = user->cmds;
@@ -627,6 +673,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
num_of_cmds = i;
goto cmd_err;
}
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata[i]->rpmb = rpmb;
}
card = md->queue.card;
@@ -643,7 +691,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
req = blk_get_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -691,7 +740,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_cmd(md,
- (struct mmc_ioc_cmd __user *)arg);
+ (struct mmc_ioc_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
case MMC_IOC_MULTI_CMD:
@@ -702,7 +752,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_multi_cmd(md,
- (struct mmc_ioc_multi_cmd __user *)arg);
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
default:
@@ -1152,18 +1203,6 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
-int mmc_access_rpmb(struct mmc_queue *mq)
-{
- struct mmc_blk_data *md = mq->blkdata;
- /*
- * If this is a RPMB partition access, return ture
- */
- if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
- return true;
-
- return false;
-}
-
/*
* The non-block commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
@@ -1174,17 +1213,19 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
struct mmc_queue_req *mq_rq;
struct mmc_card *card = mq->card;
struct mmc_blk_data *md = mq->blkdata;
- struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
struct mmc_blk_ioc_data **idata;
+ bool rpmb_ioctl;
u8 **ext_csd;
u32 status;
int ret;
int i;
mq_rq = req_to_mmc_queue_req(req);
+ rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
+ case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
@@ -1192,8 +1233,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
/* Always switch back to main area after RPMB access */
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- mmc_blk_part_switch(card, main_md->part_type);
+ if (rpmb_ioctl)
+ mmc_blk_part_switch(card, 0);
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
@@ -1534,25 +1575,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr,
- bool *do_data_tag)
+ int disable_multi, bool *do_rel_wr_p,
+ bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mmc_queue_req_to_req(mqrq);
+ bool do_rel_wr, do_data_tag;
/*
* Reliable writes are used to implement Forced Unit Access and
* are supported only on MMCs.
*/
- *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
- rq_data_dir(req) == WRITE &&
- (md->flags & MMC_BLK_REL_WR);
+ do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ rq_data_dir(req) == WRITE &&
+ (md->flags & MMC_BLK_REL_WR);
memset(brq, 0, sizeof(struct mmc_blk_request));
brq->mrq.data = &brq->data;
+ brq->mrq.tag = req->tag;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -1567,6 +1610,14 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->data.blocks = blk_rq_sectors(req);
+ brq->data.blk_addr = blk_rq_pos(req);
+
+ /*
+ * The command queue supports 2 priorities: "high" (1) and "simple" (0).
+ * The eMMC will give "high" priority tasks priority over "simple"
+ * priority tasks. Here we always set "simple" priority by not setting
+ * MMC_DATA_PRIO.
+ */
/*
* The block layer doesn't support all sector count
@@ -1596,18 +1647,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks);
}
- if (*do_rel_wr)
+ if (do_rel_wr) {
mmc_apply_rel_rw(brq, card, req);
+ brq->data.flags |= MMC_DATA_REL_WR;
+ }
/*
* Data tag is used only during writing meta data to speed
* up write and any subsequent read of this meta data
*/
- *do_data_tag = card->ext_csd.data_tag_unit_size &&
- (req->cmd_flags & REQ_META) &&
- (rq_data_dir(req) == WRITE) &&
- ((brq->data.blocks * brq->data.blksz) >=
- card->ext_csd.data_tag_unit_size);
+ do_data_tag = card->ext_csd.data_tag_unit_size &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ if (do_data_tag)
+ brq->data.flags |= MMC_DATA_DAT_TAG;
mmc_set_data_timeout(&brq->data, card);
@@ -1634,6 +1690,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
}
mqrq->areq.mrq = &brq->mrq;
+
+ if (do_rel_wr_p)
+ *do_rel_wr_p = do_rel_wr;
+
+ if (do_data_tag_p)
+ *do_data_tag_p = do_data_tag;
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1948,7 +2010,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (req && !mq->qcnt)
/* claim host only for the first request */
- mmc_get_card(card);
+ mmc_get_card(card, NULL);
ret = mmc_blk_part_switch(card, md->part_type);
if (ret) {
@@ -2011,7 +2073,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
out:
if (!mq->qcnt)
- mmc_put_card(card);
+ mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2068,6 +2130,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
spin_lock_init(&md->lock);
INIT_LIST_HEAD(&md->part);
+ INIT_LIST_HEAD(&md->rpmbs);
md->usage = 1;
ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
@@ -2186,6 +2249,158 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
return 0;
}
+/**
+ * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
+ * @filp: the character device file
+ * @cmd: the ioctl() command
+ * @arg: the argument from userspace
+ *
+ * This will essentially just redirect the ioctl()s coming in over to
+ * the main block device spawning the RPMB character device.
+ */
+static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mmc_rpmb_data *rpmb = filp->private_data;
+ int ret;
+
+ switch (cmd) {
+ case MMC_IOC_CMD:
+ ret = mmc_blk_ioctl_cmd(rpmb->md,
+ (struct mmc_ioc_cmd __user *)arg,
+ rpmb);
+ break;
+ case MMC_IOC_MULTI_CMD:
+ ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ rpmb);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ get_device(&rpmb->dev);
+ filp->private_data = rpmb;
+ mmc_blk_get(rpmb->md->disk);
+
+ return nonseekable_open(inode, filp);
+}
+
+static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ put_device(&rpmb->dev);
+ mmc_blk_put(rpmb->md);
+
+ return 0;
+}
+
+static const struct file_operations mmc_rpmb_fileops = {
+ .release = mmc_rpmb_chrdev_release,
+ .open = mmc_rpmb_chrdev_open,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = mmc_rpmb_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_rpmb_ioctl_compat,
+#endif
+};
+
+static void mmc_blk_rpmb_device_release(struct device *dev)
+{
+ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+
+ ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ kfree(rpmb);
+}
+
+static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_index,
+ sector_t size,
+ const char *subname)
+{
+ int devidx, ret;
+ char rpmb_name[DISK_NAME_LEN];
+ char cap_str[10];
+ struct mmc_rpmb_data *rpmb;
+
+ /* This creates the minor number for the RPMB char device */
+ devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ if (devidx < 0)
+ return devidx;
+
+ rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
+ if (!rpmb) {
+ ida_simple_remove(&mmc_rpmb_ida, devidx);
+ return -ENOMEM;
+ }
+
+ snprintf(rpmb_name, sizeof(rpmb_name),
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
+
+ rpmb->id = devidx;
+ rpmb->part_index = part_index;
+ rpmb->dev.init_name = rpmb_name;
+ rpmb->dev.bus = &mmc_rpmb_bus_type;
+ rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
+ rpmb->dev.parent = &card->dev;
+ rpmb->dev.release = mmc_blk_rpmb_device_release;
+ device_initialize(&rpmb->dev);
+ dev_set_drvdata(&rpmb->dev, rpmb);
+ rpmb->md = md;
+
+ cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
+ rpmb->chrdev.owner = THIS_MODULE;
+ ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
+ if (ret) {
+ pr_err("%s: could not add character device\n", rpmb_name);
+ goto out_put_device;
+ }
+
+ list_add(&rpmb->node, &md->rpmbs);
+
+ string_get_size((u64)size, 512, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+
+ pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n",
+ rpmb_name, mmc_card_id(card),
+ mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str,
+ MAJOR(mmc_rpmb_devt), rpmb->id);
+
+ return 0;
+
+out_put_device:
+ put_device(&rpmb->dev);
+ return ret;
+}
+
+static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
+
+{
+ cdev_device_del(&rpmb->chrdev, &rpmb->dev);
+ put_device(&rpmb->dev);
+}
+
/* MMC Physical partitions consist of two boot partitions and
* up to four general purpose partitions.
* For each partition enabled in EXT_CSD a block device will be allocatedi
@@ -2194,13 +2409,26 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
- int idx, ret = 0;
+ int idx, ret;
if (!mmc_card_mmc(card))
return 0;
for (idx = 0; idx < card->nr_parts; idx++) {
- if (card->part[idx].size) {
+ if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
+ /*
+ * RPMB partitions does not provide block access, they
+ * are only accessed using ioctl():s. Thus create
+ * special RPMB block devices that do not have a
+ * backing block queue for these.
+ */
+ ret = mmc_blk_alloc_rpmb_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].name);
+ if (ret)
+ return ret;
+ } else if (card->part[idx].size) {
ret = mmc_blk_alloc_part(card, md,
card->part[idx].part_cfg,
card->part[idx].size >> 9,
@@ -2212,7 +2440,7 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
}
}
- return ret;
+ return 0;
}
static void mmc_blk_remove_req(struct mmc_blk_data *md)
@@ -2249,7 +2477,15 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
{
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
+ struct mmc_rpmb_data *rpmb;
+ /* Remove RPMB partitions */
+ list_for_each_safe(pos, q, &md->rpmbs) {
+ rpmb = list_entry(pos, struct mmc_rpmb_data, node);
+ list_del(pos);
+ mmc_blk_remove_rpmb_part(rpmb);
+ }
+ /* Remove block partitions */
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
@@ -2568,6 +2804,17 @@ static int __init mmc_blk_init(void)
{
int res;
+ res = bus_register(&mmc_rpmb_bus_type);
+ if (res < 0) {
+ pr_err("mmcblk: could not register RPMB bus type\n");
+ return res;
+ }
+ res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
+ if (res < 0) {
+ pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
+ goto out_bus_unreg;
+ }
+
if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
pr_info("mmcblk: using %d minors per device\n", perdev_minors);
@@ -2575,16 +2822,20 @@ static int __init mmc_blk_init(void)
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
- goto out;
+ goto out_chrdev_unreg;
res = mmc_register_driver(&mmc_driver);
if (res)
- goto out2;
+ goto out_blkdev_unreg;
return 0;
- out2:
+
+out_blkdev_unreg:
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
- out:
+out_chrdev_unreg:
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
+out_bus_unreg:
+ bus_unregister(&mmc_rpmb_bus_type);
return res;
}
@@ -2592,6 +2843,7 @@ static void __exit mmc_blk_exit(void)
{
mmc_unregister_driver(&mmc_driver);
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
}
module_init(mmc_blk_init);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 301246513a37..a4b49e25fe96 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -369,10 +369,17 @@ int mmc_add_card(struct mmc_card *card)
*/
void mmc_remove_card(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
+
#ifdef CONFIG_DEBUG_FS
mmc_remove_card_debugfs(card);
#endif
+ if (host->cqe_enabled) {
+ host->cqe_ops->cqe_disable(host);
+ host->cqe_enabled = false;
+ }
+
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
pr_info("%s: SPI card removed\n",
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 66c9cf49ad2f..1f0f44f4dd5f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -266,7 +266,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
host->ops->request(host, mrq);
}
-static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
+static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
+ bool cqe)
{
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
@@ -275,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
}
if (mrq->cmd) {
- pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
- mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
- mrq->cmd->flags);
+ pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), cqe ? "CQE direct " : "",
+ mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+ } else if (cqe) {
+ pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
+ mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
}
if (mrq->data) {
@@ -333,7 +337,7 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
-static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
if (mmc_card_removed(host->card))
return -ENOMEDIUM;
- mmc_mrq_pr_debug(host, mrq);
+ mmc_mrq_pr_debug(host, mrq, false);
WARN_ON(!host->claimed);
@@ -355,6 +359,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
+EXPORT_SYMBOL(mmc_start_request);
/*
* mmc_wait_data_done() - done callback for data request
@@ -482,6 +487,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_wait_for_req_done);
+/*
+ * mmc_cqe_start_req - Start a CQE request.
+ * @host: MMC host to start the request
+ * @mrq: request to start
+ *
+ * Start the request, re-tuning if needed and it is possible. Returns an error
+ * code if the request fails to start or -EBUSY if CQE is busy.
+ */
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ /*
+ * CQE cannot process re-tuning commands. Caller must hold retuning
+ * while CQE is in use. Re-tuning can happen here only when CQE has no
+ * active requests i.e. this is the first. Note, re-tuning will call
+ * ->cqe_off().
+ */
+ err = mmc_retune(host);
+ if (err)
+ goto out_err;
+
+ mrq->host = host;
+
+ mmc_mrq_pr_debug(host, mrq, true);
+
+ err = mmc_mrq_prep(host, mrq);
+ if (err)
+ goto out_err;
+
+ err = host->cqe_ops->cqe_request(host, mrq);
+ if (err)
+ goto out_err;
+
+ trace_mmc_request_start(host, mrq);
+
+ return 0;
+
+out_err:
+ if (mrq->cmd) {
+ pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, err);
+ } else {
+ pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
+ mmc_hostname(host), mrq->tag, err);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_start_req);
+
+/**
+ * mmc_cqe_request_done - CQE has finished processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which completed
+ *
+ * CQE drivers should call this function when they have completed
+ * their processing of a request.
+ */
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mmc_should_fail_request(host, mrq);
+
+ /* Flag re-tuning needed on CRC errors */
+ if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
+ (mrq->data && mrq->data->error == -EILSEQ))
+ mmc_retune_needed(host);
+
+ trace_mmc_request_done(host, mrq);
+
+ if (mrq->cmd) {
+ pr_debug("%s: CQE req done (direct CMD%u): %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
+ } else {
+ pr_debug("%s: CQE transfer done tag %d\n",
+ mmc_hostname(host), mrq->tag);
+ }
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ mrq->done(mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_request_done);
+
+/**
+ * mmc_cqe_post_req - CQE post process of a completed MMC request
+ * @host: MMC host
+ * @mrq: MMC request to be processed
+ */
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->cqe_ops->cqe_post_req)
+ host->cqe_ops->cqe_post_req(host, mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_post_req);
+
+/* Arbitrary 1 second timeout */
+#define MMC_CQE_RECOVERY_TIMEOUT 1000
+
+/*
+ * mmc_cqe_recovery - Recover from CQE errors.
+ * @host: MMC host to recover
+ *
+ * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
+ * in eMMC, and discarding the queue in CQE. CQE must call
+ * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
+ * fails to discard its queue.
+ */
+int mmc_cqe_recovery(struct mmc_host *host)
+{
+ struct mmc_command cmd;
+ int err;
+
+ mmc_retune_hold_now(host);
+
+ /*
+ * Recovery is expected seldom, if at all, but it reduces performance,
+ * so make sure it is not completely silent.
+ */
+ pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
+
+ host->cqe_ops->cqe_recovery_start(host);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_STOP_TRANSMISSION,
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ mmc_wait_for_cmd(host, &cmd, 0);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ cmd.arg = 1; /* Discard entire queue */
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
+ mmc_retune_release(host);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_recovery);
+
/**
* mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
* @host: MMC host
@@ -832,9 +986,36 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
}
EXPORT_SYMBOL(mmc_align_data_size);
+/*
+ * Allow claiming an already claimed host if the context is the same or there is
+ * no context but the task is the same.
+ */
+static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ return host->claimer == ctx ||
+ (!ctx && task && host->claimer->task == task);
+}
+
+static inline void mmc_ctx_set_claimer(struct mmc_host *host,
+ struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ if (!host->claimer) {
+ if (ctx)
+ host->claimer = ctx;
+ else
+ host->claimer = &host->default_ctx;
+ }
+ if (task)
+ host->claimer->task = task;
+}
+
/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
+ * @ctx: context that claims the host or NULL in which case the default
+ * context will be used
* @abort: whether or not the operation should be aborted
*
* Claim a host for a set of operations. If @abort is non null and
@@ -842,8 +1023,10 @@ EXPORT_SYMBOL(mmc_align_data_size);
* that non-zero value without acquiring the lock. Returns zero
* with the lock held otherwise.
*/
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort)
{
+ struct task_struct *task = ctx ? NULL : current;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int stop;
@@ -856,7 +1039,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
- if (stop || !host->claimed || host->claimer == current)
+ if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
@@ -865,7 +1048,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
set_current_state(TASK_RUNNING);
if (!stop) {
host->claimed = 1;
- host->claimer = current;
+ mmc_ctx_set_claimer(host, ctx, task);
host->claim_cnt += 1;
if (host->claim_cnt == 1)
pm = true;
@@ -900,6 +1083,7 @@ void mmc_release_host(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
} else {
host->claimed = 0;
+ host->claimer->task = NULL;
host->claimer = NULL;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
@@ -913,10 +1097,10 @@ EXPORT_SYMBOL(mmc_release_host);
* This is a helper function, which fetches a runtime pm reference for the
* card device and also claims the host.
*/
-void mmc_get_card(struct mmc_card *card)
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
pm_runtime_get_sync(&card->dev);
- mmc_claim_host(card->host);
+ __mmc_claim_host(card->host, ctx, NULL);
}
EXPORT_SYMBOL(mmc_get_card);
@@ -924,9 +1108,13 @@ EXPORT_SYMBOL(mmc_get_card);
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
*/
-void mmc_put_card(struct mmc_card *card)
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
- mmc_release_host(card->host);
+ struct mmc_host *host = card->host;
+
+ WARN_ON(ctx && host->claimer != ctx);
+
+ mmc_release_host(host);
pm_runtime_mark_last_busy(&card->dev);
pm_runtime_put_autosuspend(&card->dev);
}
@@ -1400,6 +1588,16 @@ EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
#endif /* CONFIG_REGULATOR */
+/**
+ * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. errno should be handled, it is either a critical error
+ * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
+ * regulators have been found because they all are optional. If you require
+ * certain regulators, you need to check separately in your driver if they got
+ * populated after calling this function.
+ */
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
@@ -1484,11 +1682,33 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
}
+int mmc_host_set_uhs_voltage(struct mmc_host *host)
+{
+ u32 clock;
+
+ /*
+ * During a signal voltage level switch, the clock must be gated
+ * for 5 ms according to the SD spec
+ */
+ clock = host->ios.clock;
+ host->ios.clock = 0;
+ mmc_set_ios(host);
+
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
+ return -EAGAIN;
+
+ /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
+ mmc_delay(10);
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+
+ return 0;
+}
+
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
{
struct mmc_command cmd = {};
int err = 0;
- u32 clock;
/*
* If we cannot switch voltages, return failure so the caller
@@ -1520,15 +1740,8 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
err = -EAGAIN;
goto power_cycle;
}
- /*
- * During a signal voltage level switch, the clock must be gated
- * for 5 ms according to the SD spec
- */
- clock = host->ios.clock;
- host->ios.clock = 0;
- mmc_set_ios(host);
- if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
+ if (mmc_host_set_uhs_voltage(host)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
@@ -1537,11 +1750,6 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
goto power_cycle;
}
- /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
- mmc_delay(10);
- host->ios.clock = clock;
- mmc_set_ios(host);
-
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ca861091a776..71e6c6d7ceb7 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -49,6 +49,7 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr);
+int mmc_host_set_uhs_voltage(struct mmc_host *host);
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
@@ -107,6 +108,8 @@ static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
+
struct mmc_async_req;
struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
@@ -128,10 +131,11 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write);
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort);
void mmc_release_host(struct mmc_host *host);
-void mmc_get_card(struct mmc_card *card);
-void mmc_put_card(struct mmc_card *card);
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
/**
* mmc_claim_host - exclusively claim a host
@@ -141,7 +145,11 @@ void mmc_put_card(struct mmc_card *card);
*/
static inline void mmc_claim_host(struct mmc_host *host)
{
- __mmc_claim_host(host, NULL);
+ __mmc_claim_host(host, NULL, NULL);
}
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_cqe_recovery(struct mmc_host *host);
+
#endif
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ad88deb2e8f3..35a9e4fd1a9f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -111,12 +111,6 @@ void mmc_retune_hold(struct mmc_host *host)
host->hold_retune += 1;
}
-void mmc_retune_hold_now(struct mmc_host *host)
-{
- host->retune_now = 0;
- host->hold_retune += 1;
-}
-
void mmc_retune_release(struct mmc_host *host)
{
if (host->hold_retune)
@@ -124,6 +118,7 @@ void mmc_retune_release(struct mmc_host *host)
else
WARN_ON(1);
}
+EXPORT_SYMBOL(mmc_retune_release);
int mmc_retune(struct mmc_host *host)
{
@@ -184,7 +179,7 @@ static void mmc_retune_timer(unsigned long data)
int mmc_of_parse(struct mmc_host *host)
{
struct device *dev = host->parent;
- u32 bus_width;
+ u32 bus_width, drv_type;
int ret;
bool cd_cap_invert, cd_gpio_invert = false;
bool ro_cap_invert, ro_gpio_invert = false;
@@ -326,6 +321,15 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
+ /* Must be after "non-removable" check */
+ if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
+ if (host->caps & MMC_CAP_NONREMOVABLE)
+ host->fixed_drv_type = drv_type;
+ else
+ dev_err(host->parent,
+ "can't use fixed driver type, media is removable\n");
+ }
+
host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) {
dev_err(host->parent,
@@ -398,6 +402,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_SIZE / 512;
+ host->fixed_drv_type = -EINVAL;
+
return host;
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 77d6f60d1bf9..fb689a1065ed 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -19,12 +19,17 @@ void mmc_unregister_host_class(void);
void mmc_retune_enable(struct mmc_host *host);
void mmc_retune_disable(struct mmc_host *host);
void mmc_retune_hold(struct mmc_host *host);
-void mmc_retune_hold_now(struct mmc_host *host);
void mmc_retune_release(struct mmc_host *host);
int mmc_retune(struct mmc_host *host);
void mmc_retune_pause(struct mmc_host *host);
void mmc_retune_unpause(struct mmc_host *host);
+static inline void mmc_retune_hold_now(struct mmc_host *host)
+{
+ host->retune_now = 0;
+ host->hold_retune += 1;
+}
+
static inline void mmc_retune_recheck(struct mmc_host *host)
{
if (host->hold_retune <= 1)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 36217ad5e9b1..a552f61060d2 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -780,6 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
card->ext_csd.device_life_time_est_typ_a,
@@ -838,6 +839,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_prv.attr,
+ &dev_attr_rev.attr,
&dev_attr_pre_eol_info.attr,
&dev_attr_life_time.attr,
&dev_attr_serial.attr,
@@ -1289,13 +1291,18 @@ out_err:
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
+ int fixed_drv_type = card->host->fixed_drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
mmc_driver_type_mask(0);
- drive_strength = mmc_select_drive_strength(card,
- card->ext_csd.hs200_max_dtr,
- card_drv_type, &drv_type);
+ if (fixed_drv_type >= 0)
+ drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
+ ? fixed_drv_type : 0;
+ else
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
card->drive_strength = drive_strength;
@@ -1786,12 +1793,41 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Enable Command Queue if supported. Note that Packed Commands cannot
+ * be used with Command Queue.
+ */
+ card->ext_csd.cmdq_en = false;
+ if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
+ err = mmc_cmdq_enable(card);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling CMDQ failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ err = 0;
+ }
+ }
+ /*
* In some cases (e.g. RPMB or mmc_test), the Command Queue must be
* disabled for a time, so a flag is needed to indicate to re-enable the
* Command Queue.
*/
card->reenable_cmdq = card->ext_csd.cmdq_en;
+ if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
+ err = host->cqe_ops->cqe_enable(host, card);
+ if (err) {
+ pr_err("%s: Failed to enable CQE, error %d\n",
+ mmc_hostname(host), err);
+ } else {
+ host->cqe_enabled = true;
+ pr_info("%s: Command Queue Engine enabled\n",
+ mmc_hostname(host));
+ }
+ }
+
if (!oldcard)
host->card = card;
@@ -1911,14 +1947,14 @@ static void mmc_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_remove(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 54686ca4bfb7..908e4db03535 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -977,7 +977,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
from_exception)
return;
- mmc_claim_host(card->host);
if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
timeout = MMC_OPS_TIMEOUT_MS;
use_busy_signal = true;
@@ -995,7 +994,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
mmc_retune_release(card->host);
- goto out;
+ return;
}
/*
@@ -1007,9 +1006,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
mmc_card_set_doing_bkops(card);
else
mmc_retune_release(card->host);
-out:
- mmc_release_host(card->host);
}
+EXPORT_SYMBOL(mmc_start_bkops);
/*
* Flush the cache to the non-volatile storage.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 0a4e77a5ba33..4f33d277b125 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -30,7 +30,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
- if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
+ if (mq && mmc_card_removed(mq->card))
return BLKPREP_KILL;
req->rq_flags |= RQF_DONTPREP;
@@ -177,6 +177,29 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u64 limit = BLK_BOUNCE_HIGH;
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ /* Initialize thread_sem even if it is not used */
+ sema_init(&mq->thread_sem, 1);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -190,12 +213,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
- u64 limit = BLK_BOUNCE_HIGH;
int ret = -ENOMEM;
- if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
-
mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
@@ -214,18 +233,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
}
blk_queue_prep_rq(mq->queue, mmc_prep_request);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
- if (mmc_can_erase(card))
- mmc_queue_setup_discard(mq->queue, card);
- blk_queue_bounce_limit(mq->queue, limit);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
-
- sema_init(&mq->thread_sem, 1);
+ mmc_setup_queue(mq, card);
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
host->index, subname ? subname : "");
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 6bfba32ffa66..547b457c4251 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -36,12 +36,14 @@ struct mmc_blk_request {
/**
* enum mmc_drv_op - enumerates the operations in the mmc_queue_req
* @MMC_DRV_OP_IOCTL: ioctl operation
+ * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation
* @MMC_DRV_OP_BOOT_WP: write protect boot partitions
* @MMC_DRV_OP_GET_CARD_STATUS: get card status
* @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
*/
enum mmc_drv_op {
MMC_DRV_OP_IOCTL,
+ MMC_DRV_OP_IOCTL_RPMB,
MMC_DRV_OP_BOOT_WP,
MMC_DRV_OP_GET_CARD_STATUS,
MMC_DRV_OP_GET_EXT_CSD,
@@ -82,6 +84,4 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
-extern int mmc_access_rpmb(struct mmc_queue *);
-
#endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4fd1620b732d..45bf78f32716 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -908,6 +908,18 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)
return max_dtr;
}
+static bool mmc_sd_card_using_v18(struct mmc_card *card)
+{
+ /*
+ * According to the SD spec., the Bus Speed Mode (function group 1) bits
+ * 2 to 4 are zero if the card is initialized at 3.3V signal level. Thus
+ * they can be used to determine if the card has already switched to
+ * 1.8V signaling.
+ */
+ return card->sw_caps.sd3_bus_mode &
+ (SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -921,9 +933,10 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
int err;
u32 cid[4];
u32 rocr = 0;
+ bool v18_fixup_failed = false;
WARN_ON(!host->claimed);
-
+retry:
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
if (err)
return err;
@@ -989,6 +1002,36 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto free_card;
+ /*
+ * If the card has not been power cycled, it may still be using 1.8V
+ * signaling. Detect that situation and try to initialize a UHS-I (1.8V)
+ * transfer mode.
+ */
+ if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
+ mmc_sd_card_using_v18(card) &&
+ host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
+ /*
+ * Re-read switch information in case it has changed since
+ * oldcard was initialized.
+ */
+ if (oldcard) {
+ err = mmc_read_switch(card);
+ if (err)
+ goto free_card;
+ }
+ if (mmc_sd_card_using_v18(card)) {
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
+ }
+ goto done;
+ }
+ }
+
/* Initialization sequence for UHS-I cards */
if (rocr & SD_ROCR_S18A) {
err = mmc_sd_init_uhs_card(card);
@@ -1021,7 +1064,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+done:
host->card = card;
return 0;
@@ -1056,14 +1099,14 @@ static void mmc_sd_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_sd_remove(host);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index c771843e4c15..7a2eaf8410a3 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -155,7 +155,8 @@ static int sdio_irq_thread(void *_host)
* holding of the host lock does not cover too much work
* that doesn't require that lock to be held.
*/
- ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
+ ret = __mmc_claim_host(host, NULL,
+ &host->sdio_irq_thread_abort);
if (ret)
break;
ret = process_sdio_pending_irqs(host);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8c15637178ff..567028c9219a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -352,6 +352,19 @@ config MMC_MESON_GX
If you have a controller with this interface, say Y here.
+config MMC_MESON_MX_SDIO
+ tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on HAS_DMA
+ depends on OF
+ help
+ This selects support for the SD/MMC Host Controller on
+ Amlogic Meson6, Meson8 and Meson8b SoCs.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
config MMC_MOXART
tristate "MOXART SD/MMC Host Controller support"
depends on ARCH_MOXART && MMC
@@ -429,6 +442,7 @@ config MMC_SDHCI_MSM
tristate "Qualcomm SDHCI Controller Support"
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -663,7 +677,7 @@ config MMC_CAVIUM_OCTEON
config MMC_CAVIUM_THUNDERX
tristate "Cavium ThunderX SD/MMC Card Interface support"
depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- depends on GPIOLIB
+ depends on GPIO_THUNDERX
depends on OF_ADDRESS
help
This selects Cavium ThunderX SD/MMC Card Interface.
@@ -899,3 +913,15 @@ config MMC_SDHCI_XENON
This selects Marvell Xenon eMMC/SD/SDIO SDHCI.
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config MMC_SDHCI_OMAP
+ tristate "TI SDHCI Controller Support"
+ depends on MMC_SDHCI_PLTFM && OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ support present in TI's DRA7 SOCs. The controller supports
+ SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7c7b29ff591a..a43cf0d5a5d3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o
+obj-$(CONFIG_MMC_MESON_MX_SDIO) += meson-mx-sdio.o
obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
@@ -90,6 +91,7 @@ obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
+obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 0a0ebf3a096d..e55f3932d580 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -732,11 +732,11 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
return 0;
}
-static void atmci_timeout_timer(unsigned long data)
+static void atmci_timeout_timer(struct timer_list *t)
{
struct atmel_mci *host;
- host = (struct atmel_mci *)data;
+ host = from_timer(host, t, timer);
dev_dbg(&host->pdev->dev, "software timeout\n");
@@ -1661,9 +1661,9 @@ static void atmci_command_complete(struct atmel_mci *host,
cmd->error = 0;
}
-static void atmci_detect_change(unsigned long data)
+static void atmci_detect_change(struct timer_list *t)
{
- struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
+ struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer);
bool present;
bool present_old;
@@ -2349,8 +2349,7 @@ static int atmci_init_slot(struct atmel_mci *host,
if (gpio_is_valid(slot->detect_pin)) {
int ret;
- setup_timer(&slot->detect_timer, atmci_detect_change,
- (unsigned long)slot);
+ timer_setup(&slot->detect_timer, atmci_detect_change, 0);
ret = request_irq(gpio_to_irq(slot->detect_pin),
atmci_detect_interrupt,
@@ -2563,7 +2562,7 @@ static int atmci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, atmci_timeout_timer, 0);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index fbd29f00fca0..ed5cefb83768 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -967,7 +967,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/*
* Legacy Octeon firmware has no regulator entry, fall-back to
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 64cda84b2302..73fd75c3c824 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -75,7 +75,7 @@ struct hs_timing {
u32 smpl_phase_min;
};
-struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
+static struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
{ /* reserved */ },
{ /* SD */
{7, 0, 15, 15,}, /* 0: LEGACY 400k */
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 4f2806720c5c..0aa39975f33b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -817,7 +817,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
struct dma_slave_config cfg;
struct dma_async_tx_descriptor *desc = NULL;
struct scatterlist *sgl = host->data->sg;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 sg_elems = host->data->sg_len;
u32 fifoth_val;
u32 fifo_offset = host->fifo_reg - host->regs;
@@ -1024,7 +1024,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 fifo_width = 1 << host->data_shift;
u32 blksz_depth = blksz / fifo_width, fifoth_val;
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
@@ -1938,6 +1938,7 @@ static void dw_mci_set_drto(struct dw_mci *host)
unsigned int drto_clks;
unsigned int drto_div;
unsigned int drto_ms;
+ unsigned long irqflags;
drto_clks = mci_readl(host, TMOUT) >> 8;
drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
@@ -1949,7 +1950,11 @@ static void dw_mci_set_drto(struct dw_mci *host)
/* add a bit spare time */
drto_ms += 10;
- mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ mod_timer(&host->dto_timer,
+ jiffies + msecs_to_jiffies(drto_ms));
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
@@ -1970,6 +1975,18 @@ static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
return true;
}
+static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
+{
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ return false;
+
+ /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
+ WARN_ON(del_timer_sync(&host->dto_timer));
+ clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+
+ return true;
+}
+
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
@@ -2111,8 +2128,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* fall through */
case STATE_DATA_BUSY:
- if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
- &host->pending_events)) {
+ if (!dw_mci_clear_pending_data_complete(host)) {
/*
* If data error interrupt comes but data over
* interrupt doesn't come within the given time.
@@ -2682,6 +2698,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_DATA_OVER) {
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
@@ -2694,6 +2712,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & SDMMC_INT_RXDR) {
@@ -2791,7 +2811,7 @@ static int dw_mci_init_slot(struct dw_mci *host)
/*if there are external regulators, get them*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto err_host_allocated;
if (!mmc->ocr_avail)
@@ -2971,9 +2991,9 @@ no_dma:
host->use_dma = TRANS_MODE_PIO;
}
-static void dw_mci_cmd11_timer(unsigned long arg)
+static void dw_mci_cmd11_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cmd11_timer);
if (host->state != STATE_SENDING_CMD11) {
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
@@ -2985,9 +3005,9 @@ static void dw_mci_cmd11_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
}
-static void dw_mci_cto_timer(unsigned long arg)
+static void dw_mci_cto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cto_timer);
unsigned long irqflags;
u32 pending;
@@ -3040,10 +3060,34 @@ exit:
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
-static void dw_mci_dto_timer(unsigned long arg)
+static void dw_mci_dto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, dto_timer);
+ unsigned long irqflags;
+ u32 pending;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ /*
+ * The DTO timer is much longer than the CTO timer, so it's even less
+ * likely that we'll these cases, but it pays to be paranoid.
+ */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ if (pending & SDMMC_INT_DATA_OVER) {
+ /* The interrupt should fire; no need to act but we can warn */
+ dev_warn(host->dev, "Unexpected data interrupt latency\n");
+ goto exit;
+ }
+ if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
+ /* Presumably interrupt handler couldn't delete the timer */
+ dev_warn(host->dev, "DTO timeout when already completed\n");
+ goto exit;
+ }
+
+ /*
+ * Continued paranoia to make sure we're in the state we expect.
+ * This paranoia isn't really justified but it seems good to be safe.
+ */
switch (host->state) {
case STATE_SENDING_DATA:
case STATE_DATA_BUSY:
@@ -3058,8 +3102,13 @@ static void dw_mci_dto_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
break;
default:
+ dev_warn(host->dev, "Unexpected data timeout, state %d\n",
+ host->state);
break;
}
+
+exit:
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
#ifdef CONFIG_OF
@@ -3208,14 +3257,9 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- setup_timer(&host->cmd11_timer,
- dw_mci_cmd11_timer, (unsigned long)host);
-
- setup_timer(&host->cto_timer,
- dw_mci_cto_timer, (unsigned long)host);
-
- setup_timer(&host->dto_timer,
- dw_mci_dto_timer, (unsigned long)host);
+ timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
+ timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
+ timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 34474ad731aa..e3124f06a47e 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -74,7 +74,8 @@ struct dw_mci_dma_slave {
* @stop_abort: The command currently prepared for stoping transfer.
* @prev_blksz: The former transfer blksz record.
* @timing: Record of current ios timing.
- * @use_dma: Whether DMA channel is initialized or not.
+ * @use_dma: Which DMA channel is in use for the current transfer, zero
+ * denotes PIO mode.
* @using_dma: Whether DMA is in use for the current transfer.
* @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
* @sg_dma: Bus address of DMA buffer.
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 7db8c7a8d38d..712e08d9a45e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -586,9 +586,9 @@ poll_timeout:
return true;
}
-static void jz4740_mmc_timeout(unsigned long data)
+static void jz4740_mmc_timeout(struct timer_list *t)
{
- struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data;
+ struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
if (!test_and_clear_bit(0, &host->waiting))
return;
@@ -1036,8 +1036,7 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
jz4740_mmc_reset(host);
jz4740_mmc_clock_disable(host);
- setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
- (unsigned long)host);
+ timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
host->use_dma = true;
if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 85745ef179e2..e0862d3f65b3 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1190,7 +1190,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* Get regulators and the supported OCR mask */
host->vqmmc_enabled = false;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto free_host;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
new file mode 100644
index 000000000000..09cb89645d06
--- /dev/null
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -0,0 +1,768 @@
+/*
+ * meson-mx-sdio.c - Meson6, Meson8 and Meson8b SDIO/MMC Host Controller
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#define MESON_MX_SDIO_ARGU 0x00
+
+#define MESON_MX_SDIO_SEND 0x04
+ #define MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK GENMASK(7, 0)
+ #define MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK GENMASK(15, 8)
+ #define MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7 BIT(16)
+ #define MESON_MX_SDIO_SEND_RESP_HAS_DATA BIT(17)
+ #define MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8 BIT(18)
+ #define MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY BIT(19)
+ #define MESON_MX_SDIO_SEND_DATA BIT(20)
+ #define MESON_MX_SDIO_SEND_USE_INT_WINDOW BIT(21)
+ #define MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK GENMASK(31, 24)
+
+#define MESON_MX_SDIO_CONF 0x08
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT 0
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH 10
+ #define MESON_MX_SDIO_CONF_CMD_DISABLE_CRC BIT(10)
+ #define MESON_MX_SDIO_CONF_CMD_OUT_AT_POSITIVE_EDGE BIT(11)
+ #define MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK GENMASK(17, 12)
+ #define MESON_MX_SDIO_CONF_RESP_LATCH_AT_NEGATIVE_EDGE BIT(18)
+ #define MESON_MX_SDIO_CONF_DATA_LATCH_AT_NEGATIVE_EDGE BIT(19)
+ #define MESON_MX_SDIO_CONF_BUS_WIDTH BIT(20)
+ #define MESON_MX_SDIO_CONF_M_ENDIAN_MASK GENMASK(22, 21)
+ #define MESON_MX_SDIO_CONF_WRITE_NWR_MASK GENMASK(28, 23)
+ #define MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK GENMASK(31, 29)
+
+#define MESON_MX_SDIO_IRQS 0x0c
+ #define MESON_MX_SDIO_IRQS_STATUS_STATE_MACHINE_MASK GENMASK(3, 0)
+ #define MESON_MX_SDIO_IRQS_CMD_BUSY BIT(4)
+ #define MESON_MX_SDIO_IRQS_RESP_CRC7_OK BIT(5)
+ #define MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK BIT(6)
+ #define MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK BIT(7)
+ #define MESON_MX_SDIO_IRQS_IF_INT BIT(8)
+ #define MESON_MX_SDIO_IRQS_CMD_INT BIT(9)
+ #define MESON_MX_SDIO_IRQS_STATUS_INFO_MASK GENMASK(15, 12)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_INT BIT(16)
+ #define MESON_MX_SDIO_IRQS_AMRISC_TIMING_OUT_INT_EN BIT(17)
+ #define MESON_MX_SDIO_IRQS_ARC_TIMING_OUT_INT_EN BIT(18)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_COUNT_MASK GENMASK(31, 19)
+
+#define MESON_MX_SDIO_IRQC 0x10
+ #define MESON_MX_SDIO_IRQC_ARC_IF_INT_EN BIT(3)
+ #define MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN BIT(4)
+ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
+ #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
+ #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
+ #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
+
+#define MESON_MX_SDIO_MULT 0x14
+ #define MESON_MX_SDIO_MULT_PORT_SEL_MASK GENMASK(1, 0)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_ENABLE BIT(2)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_SCLK_ALWAYS BIT(3)
+ #define MESON_MX_SDIO_MULT_STREAM_ENABLE BIT(4)
+ #define MESON_MX_SDIO_MULT_STREAM_8BITS_MODE BIT(5)
+ #define MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX BIT(8)
+ #define MESON_MX_SDIO_MULT_DAT0_DAT1_SWAPPED BIT(10)
+ #define MESON_MX_SDIO_MULT_DAT1_DAT0_SWAPPED BIT(11)
+ #define MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK GENMASK(15, 12)
+
+#define MESON_MX_SDIO_ADDR 0x18
+
+#define MESON_MX_SDIO_EXT 0x1c
+ #define MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK GENMASK(29, 16)
+
+#define MESON_MX_SDIO_BOUNCE_REQ_SIZE (128 * 1024)
+#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1)
+#define MESON_MX_SDIO_MAX_SLOTS 3
+
+struct meson_mx_mmc_host {
+ struct device *controller_dev;
+
+ struct clk *parent_clk;
+ struct clk *core_clk;
+ struct clk_divider cfg_div;
+ struct clk *cfg_div_clk;
+ struct clk_fixed_factor fixed_factor;
+ struct clk *fixed_factor_clk;
+
+ void __iomem *base;
+ int irq;
+ spinlock_t irq_lock;
+
+ struct timer_list cmd_timeout;
+
+ unsigned int slot_id;
+ struct mmc_host *mmc;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ int error;
+};
+
+static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask,
+ u32 val)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 regval;
+
+ regval = readl(host->base + reg);
+ regval &= ~mask;
+ regval |= (val & mask);
+
+ writel(regval, host->base + reg);
+}
+
+static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host)
+{
+ writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC);
+ udelay(2);
+}
+
+static struct mmc_command *meson_mx_mmc_get_next_cmd(struct mmc_command *cmd)
+{
+ if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
+ return cmd->mrq->cmd;
+ else if (mmc_op_multi(cmd->opcode) &&
+ (!cmd->mrq->sbc || cmd->error || cmd->data->error))
+ return cmd->mrq->stop;
+ else
+ return NULL;
+}
+
+static void meson_mx_mmc_start_cmd(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned int pack_size;
+ unsigned long irqflags, timeout;
+ u32 mult, send = 0, ext = 0;
+
+ host->cmd = cmd;
+
+ if (cmd->busy_timeout)
+ timeout = msecs_to_jiffies(cmd->busy_timeout);
+ else
+ timeout = msecs_to_jiffies(1000);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ case MMC_RSP_R3:
+ /* 7 (CMD) + 32 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 45);
+ break;
+ case MMC_RSP_R2:
+ /* 7 (CMD) + 120 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 133);
+ send |= MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8;
+ break;
+ default:
+ break;
+ }
+
+ if (!(cmd->flags & MMC_RSP_CRC))
+ send |= MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ send |= MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY;
+
+ if (cmd->data) {
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ (cmd->data->blocks - 1));
+
+ pack_size = cmd->data->blksz * BITS_PER_BYTE;
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 4;
+ else
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 1;
+
+ ext |= FIELD_PREP(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ pack_size);
+
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ send |= MESON_MX_SDIO_SEND_DATA;
+ else
+ send |= MESON_MX_SDIO_SEND_RESP_HAS_DATA;
+
+ cmd->data->bytes_xfered = 0;
+ }
+
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK,
+ (0x40 | cmd->opcode));
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id);
+ mult |= BIT(31);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ /* enable the CMD done interrupt */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN);
+
+ /* clear pending interrupts */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS,
+ MESON_MX_SDIO_IRQS_CMD_INT,
+ MESON_MX_SDIO_IRQS_CMD_INT);
+
+ writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU);
+ writel(ext, host->base + MESON_MX_SDIO_EXT);
+ writel(send, host->base + MESON_MX_SDIO_SEND);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ mod_timer(&host->cmd_timeout, jiffies + timeout);
+}
+
+static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
+{
+ struct mmc_request *mrq;
+
+ mrq = host->mrq;
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned short vdd = ios->vdd;
+ unsigned long clk_rate = ios->clock;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH, 0);
+ break;
+
+ case MMC_BUS_WIDTH_4:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH,
+ MESON_MX_SDIO_CONF_BUS_WIDTH);
+ break;
+
+ case MMC_BUS_WIDTH_8:
+ default:
+ dev_err(mmc_dev(mmc), "unsupported bus width: %d\n",
+ ios->bus_width);
+ host->error = -EINVAL;
+ return;
+ }
+
+ host->error = clk_set_rate(host->cfg_div_clk, ios->clock);
+ if (host->error) {
+ dev_warn(mmc_dev(mmc),
+ "failed to set MMC clock to %lu: %d\n",
+ clk_rate, host->error);
+ return;
+ }
+
+ mmc->actual_clock = clk_get_rate(host->cfg_div_clk);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ vdd = 0;
+ /* fall-through: */
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ host->error = mmc_regulator_set_ocr(mmc,
+ mmc->supply.vmmc,
+ vdd);
+ if (host->error)
+ return;
+ }
+ break;
+ }
+}
+
+static int meson_mx_mmc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ int dma_len;
+ struct scatterlist *sg;
+
+ if (!data)
+ return 0;
+
+ sg = data->sg;
+ if (sg->offset & 3 || sg->length & 3) {
+ dev_err(mmc_dev(mmc),
+ "unaligned scatterlist: offset %x length %d\n",
+ sg->offset, sg->length);
+ return -EINVAL;
+ }
+
+ dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (dma_len <= 0) {
+ dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+
+ if (!host->error)
+ host->error = meson_mx_mmc_map_dma(mmc, mrq);
+
+ if (host->error) {
+ cmd->error = host->error;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ host->mrq = mrq;
+
+ if (mrq->data)
+ writel(sg_dma_address(mrq->data->sg),
+ host->base + MESON_MX_SDIO_ADDR);
+
+ if (mrq->sbc)
+ meson_mx_mmc_start_cmd(mmc, mrq->sbc);
+ else
+ meson_mx_mmc_start_cmd(mmc, mrq->cmd);
+}
+
+static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+
+ return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
+}
+
+static void meson_mx_mmc_read_response(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 mult;
+ int i, resp[4];
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX;
+ mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ if (cmd->flags & MMC_RSP_136) {
+ for (i = 0; i <= 3; i++)
+ resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU);
+ cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff);
+ cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff);
+ cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff);
+ cmd->resp[3] = (resp[3] << 8);
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU);
+ }
+}
+
+static irqreturn_t meson_mx_mmc_process_cmd_irq(struct meson_mx_mmc_host *host,
+ u32 irqs, u32 send)
+{
+ struct mmc_command *cmd = host->cmd;
+
+ /*
+ * NOTE: even though it shouldn't happen we sometimes get command
+ * interrupts twice (at least this is what it looks like). Ideally
+ * we find out why this happens and warn here as soon as it occurs.
+ */
+ if (!cmd)
+ return IRQ_HANDLED;
+
+ cmd->error = 0;
+ meson_mx_mmc_read_response(host->mmc, cmd);
+
+ if (cmd->data) {
+ if (!((irqs & MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK) ||
+ (irqs & MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK)))
+ cmd->error = -EILSEQ;
+ } else {
+ if (!((irqs & MESON_MX_SDIO_IRQS_RESP_CRC7_OK) ||
+ (send & MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7)))
+ cmd->error = -EILSEQ;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t meson_mx_mmc_irq(int irq, void *data)
+{
+ struct meson_mx_mmc_host *host = (void *) data;
+ u32 irqs, send;
+ unsigned long irqflags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ irqs = readl(host->base + MESON_MX_SDIO_IRQS);
+ send = readl(host->base + MESON_MX_SDIO_SEND);
+
+ if (irqs & MESON_MX_SDIO_IRQS_CMD_INT)
+ ret = meson_mx_mmc_process_cmd_irq(host, irqs, send);
+ else
+ ret = IRQ_HANDLED;
+
+ /* finally ACK all pending interrupts */
+ writel(irqs, host->base + MESON_MX_SDIO_IRQS);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ return ret;
+}
+
+static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data)
+{
+ struct meson_mx_mmc_host *host = (void *) irq_data;
+ struct mmc_command *cmd = host->cmd, *next_cmd;
+
+ if (WARN_ON(!cmd))
+ return IRQ_HANDLED;
+
+ del_timer_sync(&host->cmd_timeout);
+
+ if (cmd->data) {
+ dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg,
+ cmd->data->sg_len,
+ mmc_get_dma_dir(cmd->data));
+
+ cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks;
+ }
+
+ next_cmd = meson_mx_mmc_get_next_cmd(cmd);
+ if (next_cmd)
+ meson_mx_mmc_start_cmd(host->mmc, next_cmd);
+ else
+ meson_mx_mmc_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static void meson_mx_mmc_timeout(struct timer_list *t)
+{
+ struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout);
+ unsigned long irqflags;
+ u32 irqc;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /* disable the CMD interrupt */
+ irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+ irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN;
+ writel(irqc, host->base + MESON_MX_SDIO_IRQC);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ /*
+ * skip the timeout handling if the interrupt handler already processed
+ * the command.
+ */
+ if (!host->cmd)
+ return;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n",
+ host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS),
+ readl(host->base + MESON_MX_SDIO_ARGU));
+
+ host->cmd->error = -ETIMEDOUT;
+
+ meson_mx_mmc_request_done(host);
+}
+
+static struct mmc_host_ops meson_mx_mmc_ops = {
+ .request = meson_mx_mmc_request,
+ .set_ios = meson_mx_mmc_set_ios,
+ .card_busy = meson_mx_mmc_card_busy,
+ .get_cd = mmc_gpio_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+};
+
+static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
+{
+ struct device_node *slot_node;
+
+ /*
+ * TODO: the MMC core framework currently does not support
+ * controllers with multiple slots properly. So we only register
+ * the first slot for now
+ */
+ slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+ if (!slot_node) {
+ dev_warn(parent, "no 'mmc-slot' sub-node found\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ return of_platform_device_create(slot_node, NULL, parent);
+}
+
+static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct device *slot_dev = mmc_dev(mmc);
+ int ret;
+
+ if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) {
+ dev_err(slot_dev, "missing 'reg' property\n");
+ return -EINVAL;
+ }
+
+ if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) {
+ dev_err(slot_dev, "invalid 'reg' property value %d\n",
+ host->slot_id);
+ return -EINVAL;
+ }
+
+ /* Get regulators and the supported OCR mask */
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret)
+ return ret;
+
+ mmc->max_req_size = MESON_MX_SDIO_BOUNCE_REQ_SIZE;
+ mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_blk_count =
+ FIELD_GET(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ 0xffffffff);
+ mmc->max_blk_size = FIELD_GET(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ 0xffffffff);
+ mmc->max_blk_size -= (4 * MESON_MX_SDIO_RESPONSE_CRC16_BITS);
+ mmc->max_blk_size /= BITS_PER_BYTE;
+
+ /* Get the min and max supported clock rates */
+ mmc->f_min = clk_round_rate(host->cfg_div_clk, 1);
+ mmc->f_max = clk_round_rate(host->cfg_div_clk,
+ clk_get_rate(host->parent_clk));
+
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->ops = &meson_mx_mmc_ops;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ return ret;
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
+{
+ struct clk_init_data init;
+ const char *clk_div_parent, *clk_fixed_factor_parent;
+
+ clk_fixed_factor_parent = __clk_get_name(host->parent_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#fixed_factor",
+ dev_name(host->controller_dev));
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ init.parent_names = &clk_fixed_factor_parent;
+ init.num_parents = 1;
+ host->fixed_factor.div = 2;
+ host->fixed_factor.mult = 1;
+ host->fixed_factor.hw.init = &init;
+
+ host->fixed_factor_clk = devm_clk_register(host->controller_dev,
+ &host->fixed_factor.hw);
+ if (WARN_ON(IS_ERR(host->fixed_factor_clk)))
+ return PTR_ERR(host->fixed_factor_clk);
+
+ clk_div_parent = __clk_get_name(host->fixed_factor_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#div", dev_name(host->controller_dev));
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &clk_div_parent;
+ init.num_parents = 1;
+ host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF;
+ host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT;
+ host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH;
+ host->cfg_div.hw.init = &init;
+ host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+
+ host->cfg_div_clk = devm_clk_register(host->controller_dev,
+ &host->cfg_div.hw);
+ if (WARN_ON(IS_ERR(host->cfg_div_clk)))
+ return PTR_ERR(host->cfg_div_clk);
+
+ return 0;
+}
+
+static int meson_mx_mmc_probe(struct platform_device *pdev)
+{
+ struct platform_device *slot_pdev;
+ struct mmc_host *mmc;
+ struct meson_mx_mmc_host *host;
+ struct resource *res;
+ int ret, irq;
+ u32 conf;
+
+ slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev);
+ if (!slot_pdev)
+ return -ENODEV;
+ else if (IS_ERR(slot_pdev))
+ return PTR_ERR(slot_pdev);
+
+ mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto error_unregister_slot_pdev;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->controller_dev = &pdev->dev;
+
+ spin_lock_init(&host->irq_lock);
+ timer_setup(&host->cmd_timeout, meson_mx_mmc_timeout, 0);
+
+ platform_set_drvdata(pdev, host);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(host->controller_dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto error_free_mmc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(host->controller_dev, irq,
+ meson_mx_mmc_irq,
+ meson_mx_mmc_irq_thread, IRQF_ONESHOT,
+ NULL, host);
+ if (ret)
+ goto error_free_mmc;
+
+ host->core_clk = devm_clk_get(host->controller_dev, "core");
+ if (IS_ERR(host->core_clk)) {
+ ret = PTR_ERR(host->core_clk);
+ goto error_free_mmc;
+ }
+
+ host->parent_clk = devm_clk_get(host->controller_dev, "clkin");
+ if (IS_ERR(host->parent_clk)) {
+ ret = PTR_ERR(host->parent_clk);
+ goto error_free_mmc;
+ }
+
+ ret = meson_mx_mmc_register_clks(host);
+ if (ret)
+ goto error_free_mmc;
+
+ ret = clk_prepare_enable(host->core_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable core clock\n");
+ goto error_free_mmc;
+ }
+
+ ret = clk_prepare_enable(host->cfg_div_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable MMC clock\n");
+ goto error_disable_core_clk;
+ }
+
+ conf = 0;
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK, 39);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2);
+ writel(conf, host->base + MESON_MX_SDIO_CONF);
+
+ meson_mx_mmc_soft_reset(host);
+
+ ret = meson_mx_mmc_add_host(host);
+ if (ret)
+ goto error_disable_clks;
+
+ return 0;
+
+error_disable_clks:
+ clk_disable_unprepare(host->cfg_div_clk);
+error_disable_core_clk:
+ clk_disable_unprepare(host->core_clk);
+error_free_mmc:
+ mmc_free_host(mmc);
+error_unregister_slot_pdev:
+ of_platform_device_destroy(&slot_pdev->dev, NULL);
+ return ret;
+}
+
+static int meson_mx_mmc_remove(struct platform_device *pdev)
+{
+ struct meson_mx_mmc_host *host = platform_get_drvdata(pdev);
+ struct device *slot_dev = mmc_dev(host->mmc);
+
+ del_timer_sync(&host->cmd_timeout);
+
+ mmc_remove_host(host->mmc);
+
+ of_platform_device_destroy(slot_dev, NULL);
+
+ clk_disable_unprepare(host->cfg_div_clk);
+ clk_disable_unprepare(host->core_clk);
+
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+static const struct of_device_id meson_mx_mmc_of_match[] = {
+ { .compatible = "amlogic,meson8-sdio", },
+ { .compatible = "amlogic,meson8b-sdio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match);
+
+static struct platform_driver meson_mx_mmc_driver = {
+ .probe = meson_mx_mmc_probe,
+ .remove = meson_mx_mmc_remove,
+ .driver = {
+ .name = "meson-mx-sdio",
+ .of_match_table = of_match_ptr(meson_mx_mmc_of_match),
+ },
+};
+
+module_platform_driver(meson_mx_mmc_driver);
+
+MODULE_DESCRIPTION("Meson6, Meson8 and Meson8b SDIO/MMC Host Driver");
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f1f54a818489..e8a1bb1ae694 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1658,7 +1658,7 @@ static int mmci_probe(struct amba_device *dev,
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto clk_disable;
if (!mmc->ocr_avail)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 267f7ab08420..6457a7d8880f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -67,6 +67,7 @@
#define SDC_RESP2 0x48
#define SDC_RESP3 0x4c
#define SDC_BLK_NUM 0x50
+#define SDC_ADV_CFG0 0x64
#define EMMC_IOCON 0x7c
#define SDC_ACMD_RESP 0x80
#define MSDC_DMA_SA 0x90
@@ -74,10 +75,14 @@
#define MSDC_DMA_CFG 0x9c
#define MSDC_PATCH_BIT 0xb0
#define MSDC_PATCH_BIT1 0xb4
+#define MSDC_PATCH_BIT2 0xb8
#define MSDC_PAD_TUNE 0xec
+#define MSDC_PAD_TUNE0 0xf0
#define PAD_DS_TUNE 0x188
#define PAD_CMD_TUNE 0x18c
#define EMMC50_CFG0 0x208
+#define EMMC50_CFG3 0x220
+#define SDC_FIFO_CFG 0x228
/*--------------------------------------------------------------------------*/
/* Register Mask */
@@ -95,6 +100,9 @@
#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
+#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */
+#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */
+#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */
/* MSDC_IOCON mask */
#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
@@ -183,6 +191,9 @@
#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
+/* SDC_ADV_CFG0 mask */
+#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */
+
/* MSDC_DMA_CTRL mask */
#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
@@ -212,11 +223,22 @@
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
+#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
+
+#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
+#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */
+#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */
+#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */
+#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */
+
#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
+#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */
+#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
+#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
@@ -228,6 +250,11 @@
#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
+#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */
+
+#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */
+#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */
+
#define REQ_CMD_EIO (0x1 << 0)
#define REQ_CMD_TMO (0x1 << 1)
#define REQ_DAT_ERR (0x1 << 2)
@@ -290,9 +317,23 @@ struct msdc_save_para {
u32 pad_tune;
u32 patch_bit0;
u32 patch_bit1;
+ u32 patch_bit2;
u32 pad_ds_tune;
u32 pad_cmd_tune;
u32 emmc50_cfg0;
+ u32 emmc50_cfg3;
+ u32 sdc_fifo_cfg;
+};
+
+struct mtk_mmc_compatible {
+ u8 clk_div_bits;
+ bool hs400_tune; /* only used for MT8173 */
+ u32 pad_tune_reg;
+ bool async_fifo;
+ bool data_tune;
+ bool busy_check;
+ bool stop_clk_fix;
+ bool enhance_rx;
};
struct msdc_tune_para {
@@ -309,6 +350,7 @@ struct msdc_delay_phase {
struct msdc_host {
struct device *dev;
+ const struct mtk_mmc_compatible *dev_comp;
struct mmc_host *mmc; /* mmc structure */
int cmd_rsp;
@@ -334,11 +376,13 @@ struct msdc_host {
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
+ struct clk *src_clk_cg; /* msdc source clock control gate */
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
u32 sclk; /* SD/MS bus clock frequency */
unsigned char timing;
bool vqmmc_enabled;
+ u32 latch_ck;
u32 hs400_ds_delay;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
@@ -350,6 +394,59 @@ struct msdc_host {
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
};
+static const struct mtk_mmc_compatible mt8135_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt8173_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2701_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2712_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .enhance_rx = true,
+};
+
+static const struct of_device_id msdc_of_ids[] = {
+ { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
+ { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+ { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
+ { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msdc_of_ids);
+
static void sdr_set_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
@@ -509,7 +606,12 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
timeout = (ns + clk_ns - 1) / clk_ns + clks;
/* in 1048576 sclk cycle unit */
timeout = (timeout + (0x1 << 20) - 1) >> 20;
- sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD, &mode);
+ else
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA, &mode);
/*DDR mode will double the clk cycles for data timeout */
timeout = mode >= 2 ? timeout * 2 : timeout;
timeout = timeout > 1 ? timeout - 1 : 0;
@@ -520,6 +622,7 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
static void msdc_gate_clock(struct msdc_host *host)
{
+ clk_disable_unprepare(host->src_clk_cg);
clk_disable_unprepare(host->src_clk);
clk_disable_unprepare(host->h_clk);
}
@@ -528,6 +631,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
{
clk_prepare_enable(host->h_clk);
clk_prepare_enable(host->src_clk);
+ clk_prepare_enable(host->src_clk_cg);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
}
@@ -538,6 +642,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
u32 flags;
u32 div;
u32 sclk;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
@@ -548,7 +653,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
flags = readl(host->base + MSDC_INTEN);
sdr_clr_bits(host->base + MSDC_INTEN, flags);
- sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_clr_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
if (timing == MMC_TIMING_UHS_DDR50 ||
timing == MMC_TIMING_MMC_DDR52 ||
timing == MMC_TIMING_MMC_HS400) {
@@ -568,8 +677,12 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (timing == MMC_TIMING_MMC_HS400 &&
hz >= (host->src_clk_freq >> 1)) {
- sdr_set_bits(host->base + MSDC_CFG,
- MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
sclk = host->src_clk_freq >> 1;
div = 0; /* div is ignore when bit18 is set */
}
@@ -587,11 +700,31 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sclk = (host->src_clk_freq >> 2) / div;
}
}
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
- (mode << 8) | div);
- sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ /*
+ * As src_clk/HCLK use the same bit to gate/ungate,
+ * So if want to only gate src_clk, need gate its parent(mux).
+ */
+ if (host->src_clk_cg)
+ clk_disable_unprepare(host->src_clk_cg);
+ else
+ clk_disable_unprepare(clk_get_parent(host->src_clk));
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
+ (mode << 8) | div);
+ else
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
+ (mode << 12) | div);
+ if (host->src_clk_cg)
+ clk_prepare_enable(host->src_clk_cg);
+ else
+ clk_prepare_enable(clk_get_parent(host->src_clk));
+
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
+ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
host->sclk = sclk;
host->mclk = hz;
host->timing = timing;
@@ -605,15 +738,16 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
*/
if (host->sclk <= 52000000) {
writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->def_tune_para.pad_tune, host->base + tune_reg);
} else {
writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->saved_tune_para.pad_tune, host->base + tune_reg);
writel(host->saved_tune_para.pad_cmd_tune,
host->base + PAD_CMD_TUNE);
}
- if (timing == MMC_TIMING_MMC_HS400)
+ if (timing == MMC_TIMING_MMC_HS400 &&
+ host->dev_comp->hs400_tune)
sdr_set_field(host->base + PAD_CMD_TUNE,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
@@ -1165,6 +1299,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_init_hw(struct msdc_host *host)
{
u32 val;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
@@ -1180,14 +1315,53 @@ static void msdc_init_hw(struct msdc_host *host)
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
- writel(0, host->base + MSDC_PAD_TUNE);
+ writel(0, host->base + tune_reg);
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
writel(0x403c0046, host->base + MSDC_PATCH_BIT);
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
- writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
+ writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+ if (host->dev_comp->stop_clk_fix) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT1,
+ MSDC_PATCH_BIT1_STOP_DLY, 3);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_WRVALIDSEL);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_RDVALIDSEL);
+ }
+
+ if (host->dev_comp->busy_check)
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7));
+
+ if (host->dev_comp->async_fifo) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPWAIT, 3);
+ if (host->dev_comp->enhance_rx) {
+ sdr_set_bits(host->base + SDC_ADV_CFG0,
+ SDC_RX_ENHANCE_EN);
+ } else {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPSTSENSEL, 2);
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_CRCSTSENSEL, 2);
+ }
+ /* use async fifo, then no need tune internal delay */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGRESP);
+ sdr_set_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGCRCSTS);
+ }
+
+ if (host->dev_comp->data_tune) {
+ sdr_set_bits(host->base + tune_reg,
+ MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL);
+ } else {
+ /* choose clock tune */
+ sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL);
+ }
+
/* Configure to enable SDIO mode.
* it's must otherwise sdio cmd5 failed
*/
@@ -1200,7 +1374,9 @@ static void msdc_init_hw(struct msdc_host *host)
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->def_tune_para.pad_tune = readl(host->base + tune_reg);
+ host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
dev_dbg(host->dev, "init hardware done!");
}
@@ -1343,18 +1519,19 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
struct msdc_delay_phase internal_delay_phase;
u8 final_delay, final_maxlen;
u32 internal_delay = 0;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int cmd_err;
int i, j;
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs200_cmd_int_delay);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1373,12 +1550,13 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
- if (final_rise_delay.maxlen >= 12 && final_rise_delay.start < 4)
+ if (final_rise_delay.maxlen >= 12 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1403,20 +1581,20 @@ skip_fall:
final_maxlen = final_fall_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
}
- if (host->hs200_cmd_int_delay)
+ if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
goto skip_internal;
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY, i);
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err)
@@ -1424,7 +1602,7 @@ skip_fall:
}
dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
internal_delay_phase = get_best_delay(host, internal_delay);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY,
internal_delay_phase.final_phase);
skip_internal:
dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
@@ -1486,12 +1664,15 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int i, ret;
+ sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
+ host->latch_ck);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1506,7 +1687,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1519,14 +1700,14 @@ skip_fall:
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
@@ -1540,8 +1721,10 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
- if (host->hs400_mode)
+ if (host->hs400_mode &&
+ host->dev_comp->hs400_tune)
ret = hs400_tune_response(mmc, opcode);
else
ret = msdc_tune_response(mmc, opcode);
@@ -1556,7 +1739,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
return ret;
}
@@ -1567,6 +1750,11 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
host->hs400_mode = true;
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ /* hs400 mode must set it to 0 */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
+ /* to improve read performance, set outstanding to 2 */
+ sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2);
+
return 0;
}
@@ -1596,6 +1784,9 @@ static const struct mmc_host_ops mt_msdc_ops = {
static void msdc_of_property_parse(struct platform_device *pdev,
struct msdc_host *host)
{
+ of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
+ &host->latch_ck);
+
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
@@ -1617,12 +1808,17 @@ static int msdc_drv_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct msdc_host *host;
struct resource *res;
+ const struct of_device_id *of_id;
int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
+
+ of_id = of_match_node(msdc_of_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
/* Allocate MMC host for this device */
mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
if (!mmc)
@@ -1641,7 +1837,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto host_free;
host->src_clk = devm_clk_get(&pdev->dev, "source");
@@ -1656,6 +1852,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ /*source clock control gate is optional clock*/
+ host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg");
+ if (IS_ERR(host->src_clk_cg))
+ host->src_clk_cg = NULL;
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
@@ -1686,11 +1887,15 @@ static int msdc_drv_probe(struct platform_device *pdev)
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
+ host->dev_comp = of_id->data;
host->mmc = mmc;
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
- mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ if (host->dev_comp->clk_div_bits == 8)
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ else
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
@@ -1788,28 +1993,38 @@ static int msdc_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static void msdc_save_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
host->save_para.iocon = readl(host->base + MSDC_IOCON);
host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
- host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->save_para.pad_tune = readl(host->base + tune_reg);
host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
+ host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
+ host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
+ host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
}
static void msdc_restore_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
writel(host->save_para.iocon, host->base + MSDC_IOCON);
writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
- writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->save_para.pad_tune, host->base + tune_reg);
writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
+ writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
+ writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
+ writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
}
static int msdc_runtime_suspend(struct device *dev)
@@ -1839,12 +2054,6 @@ static const struct dev_pm_ops msdc_dev_pm_ops = {
SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
};
-static const struct of_device_id msdc_of_ids[] = {
- { .compatible = "mediatek,mt8135-mmc", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msdc_of_ids);
-
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
.remove = msdc_drv_remove,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 58d74b8d6c79..210247b3d11a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -508,9 +508,9 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
return IRQ_NONE;
}
-static void mvsd_timeout_timer(unsigned long data)
+static void mvsd_timeout_timer(struct timer_list *t)
{
- struct mvsd_host *host = (struct mvsd_host *)data;
+ struct mvsd_host *host = from_timer(host, t, timer);
void __iomem *iobase = host->base;
struct mmc_request *mrq;
unsigned long flags;
@@ -776,7 +776,7 @@ static int mvsd_probe(struct platform_device *pdev)
goto out;
}
- setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, mvsd_timeout_timer, 0);
platform_set_drvdata(pdev, mmc);
ret = mmc_add_host(mmc);
if (ret)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 1d5418e4efae..5ff8ef7223cc 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -963,10 +963,9 @@ static bool filter(struct dma_chan *chan, void *param)
return true;
}
-static void mxcmci_watchdog(unsigned long data)
+static void mxcmci_watchdog(struct timer_list *t)
{
- struct mmc_host *mmc = (struct mmc_host *)data;
- struct mxcmci_host *host = mmc_priv(mmc);
+ struct mxcmci_host *host = from_timer(host, t, watchdog);
struct mmc_request *req = host->req;
unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
@@ -1075,7 +1074,7 @@ static int mxcmci_probe(struct platform_device *pdev)
dat3_card_detect = true;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto out_free;
if (!mmc->ocr_avail) {
@@ -1165,9 +1164,7 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_free_dma;
}
- init_timer(&host->watchdog);
- host->watchdog.function = &mxcmci_watchdog;
- host->watchdog.data = (unsigned long)mmc;
+ timer_setup(&host->watchdog, mxcmci_watchdog, 0);
mmc_add_host(mmc);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index bd49f34d7654..adf32682f27a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -625,9 +625,9 @@ static void mmc_omap_abort_command(struct work_struct *work)
}
static void
-mmc_omap_cmd_timer(unsigned long data)
+mmc_omap_cmd_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer);
unsigned long flags;
spin_lock_irqsave(&host->slot_lock, flags);
@@ -654,9 +654,9 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host)
}
static void
-mmc_omap_clk_timer(unsigned long data)
+mmc_omap_clk_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, clk_timer);
mmc_omap_fclk_enable(host, 0);
}
@@ -874,9 +874,9 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
tasklet_hi_schedule(&slot->cover_tasklet);
}
-static void mmc_omap_cover_timer(unsigned long arg)
+static void mmc_omap_cover_timer(struct timer_list *t)
{
- struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
+ struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer);
tasklet_schedule(&slot->cover_tasklet);
}
@@ -1264,8 +1264,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
mmc->max_seg_size = mmc->max_req_size;
if (slot->pdata->get_cover_state != NULL) {
- setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
- (unsigned long)slot);
+ timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0);
tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
(unsigned long)slot);
}
@@ -1352,11 +1351,10 @@ static int mmc_omap_probe(struct platform_device *pdev)
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
- setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
- (unsigned long) host);
+ timer_setup(&host->cmd_abort_timer, mmc_omap_cmd_timer, 0);
spin_lock_init(&host->clk_lock);
- setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
+ timer_setup(&host->clk_timer, mmc_omap_clk_timer, 0);
spin_lock_init(&host->dma_lock);
spin_lock_init(&host->slot_lock);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 3b5e6d11069b..071693ebfe18 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -147,10 +147,6 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
-#define VDD_1V8 1800000 /* 180000 uV */
-#define VDD_3V0 3000000 /* 300000 uV */
-#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1)
-
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -308,8 +304,7 @@ err_set_ocr:
return ret;
}
-static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
- int vdd)
+static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on)
{
int ret;
@@ -317,17 +312,6 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
if (power_on) {
- if (vdd <= VDD_165_195)
- ret = regulator_set_voltage(host->pbias, VDD_1V8,
- VDD_1V8);
- else
- ret = regulator_set_voltage(host->pbias, VDD_3V0,
- VDD_3V0);
- if (ret < 0) {
- dev_err(host->dev, "pbias set voltage fail\n");
- return ret;
- }
-
if (host->pbias_enabled == 0) {
ret = regulator_enable(host->pbias);
if (ret) {
@@ -350,8 +334,7 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
}
-static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
- int vdd)
+static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on)
{
struct mmc_host *mmc = host->mmc;
int ret = 0;
@@ -363,7 +346,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (IS_ERR(mmc->supply.vmmc))
return 0;
- ret = omap_hsmmc_set_pbias(host, false, 0);
+ ret = omap_hsmmc_set_pbias(host, false);
if (ret)
return ret;
@@ -385,7 +368,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (ret)
return ret;
- ret = omap_hsmmc_set_pbias(host, true, vdd);
+ ret = omap_hsmmc_set_pbias(host, true);
if (ret)
goto err_set_voltage;
} else {
@@ -462,7 +445,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/* Allow an aux regulator */
@@ -1220,11 +1203,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
- ret = omap_hsmmc_set_power(host, 0, 0);
+ ret = omap_hsmmc_set_power(host, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
- ret = omap_hsmmc_set_power(host, 1, vdd);
+ ret = omap_hsmmc_set_power(host, 1);
if (host->dbclk)
clk_prepare_enable(host->dbclk);
@@ -1621,10 +1604,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
- omap_hsmmc_set_power(host, 0, 0);
+ omap_hsmmc_set_power(host, 0);
break;
case MMC_POWER_UP:
- omap_hsmmc_set_power(host, 1, ios->vdd);
+ omap_hsmmc_set_power(host, 1);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 8bae88a150fd..41cbe84c1d18 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -88,6 +88,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index df4465439e13..9ab10436e4b8 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -91,7 +91,6 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
};
static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
- { .compatible = "renesas,sdhi-shmobile" },
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
@@ -107,6 +106,10 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
+ { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,sdhi-shmobile" },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 41b57713b620..0848dc0f882e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -618,29 +618,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
u8 sample_point, bool rx)
{
struct rtsx_pcr *pcr = host->pcr;
- int err;
dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
__func__, rx ? "RX" : "TX", sample_point);
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
if (rx)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPRX_CTL, 0x1F, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPRX_CTL,
+ PHASE_SELECT_MASK, sample_point);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPTX_CTL, 0x1F, sample_point);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
- PHASE_NOT_RESET, PHASE_NOT_RESET);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, SD_VPTX_CTL,
+ PHASE_SELECT_MASK, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
return 0;
}
@@ -708,10 +701,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
{
int err;
struct mmc_command cmd = {};
+ struct rtsx_pcr *pcr = host->pcr;
- err = sd_change_phase(host, sample_point, true);
- if (err < 0)
- return err;
+ sd_change_phase(host, sample_point, true);
+
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
cmd.opcode = opcode;
err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100);
@@ -719,9 +714,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
/* Wait till SD DATA IDLE */
sd_wait_data_idle(host);
sd_clear_error(host);
+ rtsx_pci_write_register(pcr, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
return err;
}
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 08ae0ff13513..b988997a1e80 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -73,6 +73,7 @@ struct sdhci_acpi_slot {
unsigned int caps2;
mmc_pm_flag_t pm_caps;
unsigned int flags;
+ size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
};
@@ -82,13 +83,118 @@ struct sdhci_acpi_host {
const struct sdhci_acpi_slot *slot;
struct platform_device *pdev;
bool use_runtime_pm;
+ unsigned long private[0] ____cacheline_aligned;
};
+static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
+{
+ return (void *)c->private;
+}
+
static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
{
return c->slot && (c->slot->flags & flag);
}
+enum {
+ INTEL_DSM_FNS = 0,
+ INTEL_DSM_V18_SWITCH = 3,
+ INTEL_DSM_V33_SWITCH = 4,
+};
+
+struct intel_host {
+ u32 dsm_fns;
+};
+
+static const guid_t intel_dsm_guid =
+ GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
+ 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
+
+static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ union acpi_object *obj;
+ int err = 0;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+ if (!obj)
+ return -EOPNOTSUPP;
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *result = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) {
+ size_t len = min_t(size_t, obj->buffer.length, 4);
+
+ *result = 0;
+ memcpy(result, obj->buffer.pointer, len);
+ } else {
+ dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n",
+ __func__, fn, obj->type, obj->buffer.length);
+ err = -EINVAL;
+ }
+
+ ACPI_FREE(obj);
+
+ return err;
+}
+
+static int intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ return -EOPNOTSUPP;
+
+ return __intel_dsm(intel_host, dev, fn, result);
+}
+
+static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
+ struct mmc_host *mmc)
+{
+ int err;
+
+ err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
+ if (err) {
+ pr_debug("%s: DSM not supported, error %d\n",
+ mmc_hostname(mmc), err);
+ return;
+ }
+
+ pr_debug("%s: DSM function mask %#x\n",
+ mmc_hostname(mmc), intel_host->dsm_fns);
+}
+
+static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct device *dev = mmc_dev(mmc);
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ unsigned int fn;
+ u32 result = 0;
+ int err;
+
+ err = sdhci_start_signal_voltage_switch(mmc, ios);
+ if (err)
+ return err;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ fn = INTEL_DSM_V33_SWITCH;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ fn = INTEL_DSM_V18_SWITCH;
+ break;
+ default:
+ return 0;
+ }
+
+ err = intel_dsm(intel_host, dev, fn, &result);
+ pr_debug("%s: %s DSM fn %u error %d result %u\n",
+ mmc_hostname(mmc), __func__, fn, err, result);
+
+ return 0;
+}
+
static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
{
u8 reg;
@@ -269,56 +375,26 @@ out:
return ret;
}
-static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
+static int intel_probe_slot(struct platform_device *pdev, const char *hid,
+ const char *uid)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during emmc probe slot goes here */
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ struct sdhci_host *host = c->host;
if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
- return 0;
-}
-
-static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
-
- if (!c || !c->host)
- return 0;
-
- /* Platform specific code during sdio probe slot goes here */
-
- return 0;
-}
-
-static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host || !c->slot)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during sd probe slot goes here */
-
if (hid && !strcmp(hid, "80865ACA"))
host->mmc_host_ops.get_cd = bxt_get_cd;
+ intel_dsm_init(intel_host, &pdev->dev, host->mmc);
+
+ host->mmc_host_ops.start_signal_voltage_switch =
+ intel_start_signal_voltage_switch;
+
return 0;
}
@@ -332,7 +408,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
- .probe_slot = sdhci_acpi_emmc_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
@@ -343,7 +420,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
MMC_CAP_WAIT_WHILE_BUSY,
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
- .probe_slot = sdhci_acpi_sdio_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
@@ -353,7 +431,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
- .probe_slot = sdhci_acpi_sd_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
@@ -429,11 +508,13 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct sdhci_acpi_slot *slot;
struct acpi_device *device, *child;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
struct resource *iomem;
resource_size_t len;
+ size_t priv_size;
const char *hid;
const char *uid;
int err;
@@ -443,7 +524,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return -ENODEV;
hid = acpi_device_hid(device);
- uid = device->pnp.unique_id;
+ uid = acpi_device_uid(device);
+
+ slot = sdhci_acpi_get_slot(hid, uid);
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
@@ -467,13 +550,14 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
return -ENOMEM;
- host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host));
+ priv_size = slot ? slot->priv_size : 0;
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size);
if (IS_ERR(host))
return PTR_ERR(host);
c = sdhci_priv(host);
c->host = host;
- c->slot = sdhci_acpi_get_slot(hid, uid);
+ c->slot = slot;
c->pdev = pdev;
c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 56529c3d389a..0f589e26ee63 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -27,15 +28,14 @@
#define SDHCI_CDNS_HRS04_ACK BIT(26)
#define SDHCI_CDNS_HRS04_RD BIT(25)
#define SDHCI_CDNS_HRS04_WR BIT(24)
-#define SDHCI_CDNS_HRS04_RDATA_SHIFT 16
-#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8
-#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0
+#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16)
+#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8)
+#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0)
#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
-#define SDHCI_CDNS_HRS06_TUNE_SHIFT 8
-#define SDHCI_CDNS_HRS06_TUNE_MASK 0x3f
-#define SDHCI_CDNS_HRS06_MODE_MASK 0x7
+#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8)
+#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0)
#define SDHCI_CDNS_HRS06_MODE_SD 0x0
#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
@@ -105,8 +105,8 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
u32 tmp;
int ret;
- tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) |
- (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT);
+ tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
+ FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
writel(tmp, reg);
tmp |= SDHCI_CDNS_HRS04_WR;
@@ -189,8 +189,8 @@ static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
/* The speed mode for eMMC is selected by HRS06 register */
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
- tmp |= mode;
+ tmp &= ~SDHCI_CDNS_HRS06_MODE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode);
writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
}
@@ -199,7 +199,7 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
u32 tmp;
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- return tmp & SDHCI_CDNS_HRS06_MODE_MASK;
+ return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
}
static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
@@ -254,12 +254,12 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
u32 tmp;
- if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK))
+ if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val)))
return -EINVAL;
tmp = readl(reg);
- tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT);
- tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT;
+ tmp &= ~SDHCI_CDNS_HRS06_TUNE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val);
tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
writel(tmp, reg);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index fc73e56eb1e2..3fb7d2eec93f 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -123,14 +123,17 @@
#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
+
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
int pwr_irq; /* power irq */
- struct clk *clk; /* main SD/MMC bus clock */
- struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
+ struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
unsigned long clk_rate;
struct mmc_host *mmc;
bool use_14lpp_dll_reset;
@@ -138,6 +141,10 @@ struct sdhci_msm_host {
bool calibration_done;
u8 saved_tuning_phase;
bool use_cdclp533;
+ u32 curr_pwr_state;
+ u32 curr_io_level;
+ wait_queue_head_t pwr_irq_wait;
+ bool pwr_irq_flag;
};
static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
@@ -164,10 +171,11 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
struct mmc_ios curr_ios = host->mmc->ios;
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
int rc;
clock = msm_get_clock_rate_for_bus_mode(host, clock);
- rc = clk_set_rate(msm_host->clk, clock);
+ rc = clk_set_rate(core_clk, clock);
if (rc) {
pr_err("%s: Failed to set clock at rate %u at timing %d\n",
mmc_hostname(host->mmc), clock,
@@ -176,7 +184,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
}
msm_host->clk_rate = clock;
pr_debug("%s: Setting clock at rate %lu at timing %d\n",
- mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
+ mmc_hostname(host->mmc), clk_get_rate(core_clk),
curr_ios.timing);
}
@@ -995,21 +1003,142 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
sdhci_msm_hs400(host, &mmc->ios);
}
-static void sdhci_msm_voltage_switch(struct sdhci_host *host)
+static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
+{
+ init_waitqueue_head(&msm_host->pwr_irq_wait);
+}
+
+static inline void sdhci_msm_complete_pwr_irq_wait(
+ struct sdhci_msm_host *msm_host)
+{
+ wake_up(&msm_host->pwr_irq_wait);
+}
+
+/*
+ * sdhci_msm_check_power_status API should be called when registers writes
+ * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
+ * To what state the register writes will change the IO lines should be passed
+ * as the argument req_type. This API will check whether the IO line's state
+ * is already the expected state and will wait for power irq only if
+ * power irq is expected to be trigerred based on the current IO line state
+ * and expected IO line state.
+ */
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ bool done = false;
+
+ pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+ mmc_hostname(host->mmc), __func__, req_type,
+ msm_host->curr_pwr_state, msm_host->curr_io_level);
+
+ /*
+ * The IRQ for request type IO High/LOW will be generated when -
+ * there is a state change in 1.8V enable bit (bit 3) of
+ * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
+ * which indicates 3.3V IO voltage. So, when MMC core layer tries
+ * to set it to 3.3V before card detection happens, the
+ * IRQ doesn't get triggered as there is no state change in this bit.
+ * The driver already handles this case by changing the IO voltage
+ * level to high as part of controller power up sequence. Hence, check
+ * for host->pwr to handle a case where IO voltage high request is
+ * issued even before controller power up.
+ */
+ if ((req_type & REQ_IO_HIGH) && !host->pwr) {
+ pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
+ mmc_hostname(host->mmc), req_type);
+ return;
+ }
+ if ((req_type & msm_host->curr_pwr_state) ||
+ (req_type & msm_host->curr_io_level))
+ done = true;
+ /*
+ * This is needed here to handle cases where register writes will
+ * not change the current bus state or io level of the controller.
+ * In this case, no power irq will be triggerred and we should
+ * not wait.
+ */
+ if (!done) {
+ if (!wait_event_timeout(msm_host->pwr_irq_wait,
+ msm_host->pwr_irq_flag,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ dev_warn(&msm_host->pdev->dev,
+ "%s: pwr_irq for req: (%d) timed out\n",
+ mmc_hostname(host->mmc), req_type);
+ }
+ pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+ __func__, req_type);
+}
+
+static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+}
+
+static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u32 irq_status, irq_ack = 0;
+ int retry = 10;
+ int pwr_state = 0, io_level = 0;
+
irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
irq_status &= INT_MASK;
writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
- if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+ /*
+ * There is a rare HW scenario where the first clear pulse could be
+ * lost when actual reset and clear/read of status register is
+ * happening at a time. Hence, retry for at least 10 times to make
+ * sure status register is cleared. Otherwise, this will result in
+ * a spurious power IRQ resulting in system instability.
+ */
+ while (irq_status & readl_relaxed(msm_host->core_mem +
+ CORE_PWRCTL_STATUS)) {
+ if (retry == 0) {
+ pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
+ mmc_hostname(host->mmc), irq_status);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ WARN_ON(1);
+ break;
+ }
+ writel_relaxed(irq_status,
+ msm_host->core_mem + CORE_PWRCTL_CLEAR);
+ retry--;
+ udelay(10);
+ }
+
+ /* Handle BUS ON/OFF*/
+ if (irq_status & CORE_PWRCTL_BUS_ON) {
+ pwr_state = REQ_BUS_ON;
+ io_level = REQ_IO_HIGH;
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_BUS_OFF) {
+ pwr_state = REQ_BUS_OFF;
+ io_level = REQ_IO_LOW;
irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
- if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
+ }
+ /* Handle IO LOW/HIGH */
+ if (irq_status & CORE_PWRCTL_IO_LOW) {
+ io_level = REQ_IO_LOW;
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_IO_HIGH) {
+ io_level = REQ_IO_HIGH;
irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
/*
* The driver has to acknowledge the interrupt, switch voltages and
@@ -1017,13 +1146,27 @@ static void sdhci_msm_voltage_switch(struct sdhci_host *host)
* switches are handled by the sdhci core, so just report success.
*/
writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+
+ if (pwr_state)
+ msm_host->curr_pwr_state = pwr_state;
+ if (io_level)
+ msm_host->curr_io_level = io_level;
+
+ pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
+ mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
+ irq_ack);
}
static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
{
struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_msm_handle_pwr_irq(host, irq);
+ msm_host->pwr_irq_flag = 1;
+ sdhci_msm_complete_pwr_irq_wait(msm_host);
- sdhci_msm_voltage_switch(host);
return IRQ_HANDLED;
}
@@ -1032,8 +1175,9 @@ static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
- return clk_round_rate(msm_host->clk, ULONG_MAX);
+ return clk_round_rate(core_clk, ULONG_MAX);
}
static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
@@ -1092,6 +1236,69 @@ out:
__sdhci_msm_set_clock(host, clock);
}
+/*
+ * Platform specific register write functions. This is so that, if any
+ * register write needs to be followed up by platform specific actions,
+ * they can be added here. These functions can go to sleep when writes
+ * to certain registers are done.
+ * These functions are relying on sdhci_set_ios not using spinlock.
+ */
+static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 req_type = 0;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL2:
+ req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
+ REQ_IO_HIGH;
+ break;
+ case SDHCI_SOFTWARE_RESET:
+ if (host->pwr && (val & SDHCI_RESET_ALL))
+ req_type = REQ_BUS_OFF;
+ break;
+ case SDHCI_POWER_CONTROL:
+ req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
+ break;
+ }
+
+ if (req_type) {
+ msm_host->pwr_irq_flag = 0;
+ /*
+ * Since this register write may trigger a power irq, ensure
+ * all previous register writes are complete by this point.
+ */
+ mb();
+ }
+ return req_type;
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+ writew_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+
+ writeb_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{ .compatible = "qcom,sdhci-msm-v4" },
{},
@@ -1106,7 +1313,8 @@ static const struct sdhci_ops sdhci_msm_ops = {
.get_max_clock = sdhci_msm_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
- .voltage_switch = sdhci_msm_voltage_switch,
+ .write_w = sdhci_msm_writew,
+ .write_b = sdhci_msm_writeb,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1124,6 +1332,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
struct resource *core_memres;
+ struct clk *clk;
int ret;
u16 host_version, core_minor;
u32 core_version, config;
@@ -1160,24 +1369,42 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
/* Setup main peripheral bus clock */
- msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(msm_host->pclk)) {
- ret = PTR_ERR(msm_host->pclk);
+ clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
goto bus_clk_disable;
}
-
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret)
- goto bus_clk_disable;
+ msm_host->bulk_clks[1].clk = clk;
/* Setup SDC MMC clock */
- msm_host->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(msm_host->clk)) {
- ret = PTR_ERR(msm_host->clk);
+ clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
- goto pclk_disable;
+ goto bus_clk_disable;
}
+ msm_host->bulk_clks[0].clk = clk;
+
+ /* Vote for maximum clock rate for maximum performance */
+ ret = clk_set_rate(clk, INT_MAX);
+ if (ret)
+ dev_warn(&pdev->dev, "core clock boost failed\n");
+
+ clk = devm_clk_get(&pdev->dev, "cal");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[2].clk = clk;
+
+ clk = devm_clk_get(&pdev->dev, "sleep");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[3].clk = clk;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+ if (ret)
+ goto bus_clk_disable;
/*
* xo clock is needed for FLL feature of cm_dll.
@@ -1189,15 +1416,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
}
- /* Vote for maximum clock rate for maximum performance */
- ret = clk_set_rate(msm_host->clk, INT_MAX);
- if (ret)
- dev_warn(&pdev->dev, "core clock boost failed\n");
-
- ret = clk_prepare_enable(msm_host->clk);
- if (ret)
- goto pclk_disable;
-
core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
@@ -1251,6 +1469,21 @@ static int sdhci_msm_probe(struct platform_device *pdev)
CORE_VENDOR_SPEC_CAPABILITIES0);
}
+ /*
+ * Power on reset state may trigger power irq if previous status of
+ * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
+ * interrupt in GIC, any pending power irq interrupt should be
+ * acknowledged. Otherwise power irq interrupt handler would be
+ * fired prematurely.
+ */
+ sdhci_msm_handle_pwr_irq(host, 0);
+
+ /*
+ * Ensure that above writes are propogated before interrupt enablement
+ * in GIC.
+ */
+ mb();
+
/* Setup IRQ for handling power/voltage tasks with PMIC */
msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
if (msm_host->pwr_irq < 0) {
@@ -1260,6 +1493,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ sdhci_msm_init_pwr_irq_wait(msm_host);
+ /* Enable pwr irq interrupts */
+ writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
+
ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
sdhci_msm_pwr_irq, IRQF_ONESHOT,
dev_name(&pdev->dev), host);
@@ -1290,9 +1527,8 @@ pm_runtime_disable:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
clk_disable:
- clk_disable_unprepare(msm_host->clk);
-pclk_disable:
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
bus_clk_disable:
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
@@ -1315,8 +1551,8 @@ static int sdhci_msm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
sdhci_pltfm_free(pdev);
@@ -1330,8 +1566,8 @@ static int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
return 0;
}
@@ -1341,21 +1577,9 @@ static int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- int ret;
- ret = clk_prepare_enable(msm_host->clk);
- if (ret) {
- dev_err(dev, "clk_enable failed for core_clk: %d\n", ret);
- return ret;
- }
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret) {
- dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret);
- clk_disable_unprepare(msm_host->clk);
- return ret;
- }
-
- return 0;
+ return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 4e47ed6bc716..682c573e20a7 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -114,7 +114,8 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
-void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
{
if (timing == MMC_TIMING_MMC_DDR52)
sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d96a057a7db8..1f424374bbbb 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
return clock / 256 / 16;
}
+static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
+{
+ u32 val;
+ ktime_t timeout;
+
+ val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+
+ if (enable)
+ val |= ESDHC_CLOCK_SDCLKEN;
+ else
+ val &= ~ESDHC_CLOCK_SDCLKEN;
+
+ sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ val = ESDHC_CLOCK_STABLE;
+ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
+ if (ktime_after(ktime_get(), timeout)) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ udelay(10);
+ }
+}
+
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- if (clock == 0)
+ if (clock == 0) {
+ esdhc_clock_enable(host, false);
return;
+ }
/* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
if (esdhc->vendor_ver < VENDOR_V_23)
@@ -558,33 +587,6 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
sdhci_writel(host, ctrl, ESDHC_PROCTL);
}
-static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
-{
- u32 val;
- ktime_t timeout;
-
- val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-
- if (enable)
- val |= ESDHC_CLOCK_SDCLKEN;
- else
- val &= ~ESDHC_CLOCK_SDCLKEN;
-
- sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- val = ESDHC_CLOCK_STABLE;
- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
- if (ktime_after(ktime_get(), timeout)) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- break;
- }
- udelay(10);
- }
-}
-
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
new file mode 100644
index 000000000000..628bfe9a3d17
--- /dev/null
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -0,0 +1,607 @@
+/**
+ * SDHCI Controller driver for TI's OMAP SoCs
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include "sdhci-pltfm.h"
+
+#define SDHCI_OMAP_CON 0x12c
+#define CON_DW8 BIT(5)
+#define CON_DMA_MASTER BIT(20)
+#define CON_INIT BIT(1)
+#define CON_OD BIT(0)
+
+#define SDHCI_OMAP_CMD 0x20c
+
+#define SDHCI_OMAP_HCTL 0x228
+#define HCTL_SDBP BIT(8)
+#define HCTL_SDVS_SHIFT 9
+#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_33 (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
+
+#define SDHCI_OMAP_SYSCTL 0x22c
+#define SYSCTL_CEN BIT(2)
+#define SYSCTL_CLKD_SHIFT 6
+#define SYSCTL_CLKD_MASK 0x3ff
+
+#define SDHCI_OMAP_STAT 0x230
+
+#define SDHCI_OMAP_IE 0x234
+#define INT_CC_EN BIT(0)
+
+#define SDHCI_OMAP_AC12 0x23c
+#define AC12_V1V8_SIGEN BIT(19)
+
+#define SDHCI_OMAP_CAPA 0x240
+#define CAPA_VS33 BIT(24)
+#define CAPA_VS30 BIT(25)
+#define CAPA_VS18 BIT(26)
+
+#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
+
+#define SYSCTL_CLKD_MAX 0x3FF
+
+#define IOV_1V8 1800000 /* 180000 uV */
+#define IOV_3V0 3000000 /* 300000 uV */
+#define IOV_3V3 3300000 /* 330000 uV */
+
+struct sdhci_omap_data {
+ u32 offset;
+};
+
+struct sdhci_omap_host {
+ void __iomem *base;
+ struct device *dev;
+ struct regulator *pbias;
+ bool pbias_enabled;
+ struct sdhci_host *host;
+ u8 bus_mode;
+ u8 power_mode;
+};
+
+static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
+ unsigned int offset)
+{
+ return readl(host->base + offset);
+}
+
+static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
+ unsigned int offset, u32 data)
+{
+ writel(data, host->base + offset);
+}
+
+static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
+ bool power_on, unsigned int iov)
+{
+ int ret;
+ struct device *dev = omap_host->dev;
+
+ if (IS_ERR(omap_host->pbias))
+ return 0;
+
+ if (power_on) {
+ ret = regulator_set_voltage(omap_host->pbias, iov, iov);
+ if (ret) {
+ dev_err(dev, "pbias set voltage failed\n");
+ return ret;
+ }
+
+ if (omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_enable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg enable fail\n");
+ return ret;
+ }
+
+ omap_host->pbias_enabled = true;
+ } else {
+ if (!omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_disable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg disable fail\n");
+ return ret;
+ }
+ omap_host->pbias_enabled = false;
+ }
+
+ return 0;
+}
+
+static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
+ unsigned int iov)
+{
+ int ret;
+ struct sdhci_host *host = omap_host->host;
+ struct mmc_host *mmc = host->mmc;
+
+ ret = sdhci_omap_set_pbias(omap_host, false, 0);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
+ return ret;
+ }
+ }
+
+ ret = sdhci_omap_set_pbias(omap_host, true, iov);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
+ unsigned char signal_voltage)
+{
+ u32 reg;
+ ktime_t timeout;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ reg &= ~HCTL_SDVS_MASK;
+
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ reg |= HCTL_SDVS_33;
+ else
+ reg |= HCTL_SDVS_18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ reg |= HCTL_SDBP;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+}
+
+static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ u32 reg;
+ int ret;
+ unsigned int iov;
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct device *dev;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ dev = omap_host->dev;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS33))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg &= ~AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_3V3;
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS18))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg |= AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_1V8;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = sdhci_omap_enable_iov(omap_host, iov);
+ if (ret) {
+ dev_err(dev, "failed to switch IO voltage to %dmV\n", iov);
+ return ret;
+ }
+
+ dev_dbg(dev, "IO voltage switched to %dmV\n", iov);
+ return 0;
+}
+
+static void sdhci_omap_set_bus_mode(struct sdhci_omap_host *omap_host,
+ unsigned int mode)
+{
+ u32 reg;
+
+ if (omap_host->bus_mode == mode)
+ return;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (mode == MMC_BUSMODE_OPENDRAIN)
+ reg |= CON_OD;
+ else
+ reg &= ~CON_OD;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ omap_host->bus_mode = mode;
+}
+
+static void sdhci_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_omap_set_bus_mode(omap_host, ios->bus_mode);
+ sdhci_set_ios(mmc, ios);
+}
+
+static u16 sdhci_omap_calc_divisor(struct sdhci_pltfm_host *host,
+ unsigned int clock)
+{
+ u16 dsor;
+
+ dsor = DIV_ROUND_UP(clk_get_rate(host->clk), clock);
+ if (dsor > SYSCTL_CLKD_MAX)
+ dsor = SYSCTL_CLKD_MAX;
+
+ return dsor;
+}
+
+static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg |= SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg &= ~SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long clkdiv;
+
+ sdhci_omap_stop_clock(omap_host);
+
+ if (!clock)
+ return;
+
+ clkdiv = sdhci_omap_calc_divisor(pltfm_host, clock);
+ clkdiv = (clkdiv & SYSCTL_CLKD_MASK) << SYSCTL_CLKD_SHIFT;
+ sdhci_enable_clk(host, clkdiv);
+
+ sdhci_omap_start_clock(omap_host);
+}
+
+static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
+static int sdhci_omap_enable_dma(struct sdhci_host *host)
+{
+ u32 reg;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_DMA_MASTER;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ return 0;
+}
+
+static unsigned int sdhci_omap_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk) / SYSCTL_CLKD_MAX;
+}
+
+static void sdhci_omap_set_bus_width(struct sdhci_host *host, int width)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (width == MMC_BUS_WIDTH_8)
+ reg |= CON_DW8;
+ else
+ reg &= ~CON_DW8;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ sdhci_set_bus_width(host, width);
+}
+
+static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+{
+ u32 reg;
+ ktime_t timeout;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (omap_host->power_mode == power_mode)
+ return;
+
+ if (power_mode != MMC_POWER_ON)
+ return;
+
+ disable_irq(host->irq);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CMD, 0x0);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg &= ~CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_STAT, INT_CC_EN);
+
+ enable_irq(host->irq);
+
+ omap_host->power_mode = power_mode;
+}
+
+static struct sdhci_ops sdhci_omap_ops = {
+ .set_clock = sdhci_omap_set_clock,
+ .set_power = sdhci_omap_set_power,
+ .enable_dma = sdhci_omap_enable_dma,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = sdhci_omap_get_min_clock,
+ .set_bus_width = sdhci_omap_set_bus_width,
+ .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+ int ret = 0;
+ struct device *dev = omap_host->dev;
+ struct regulator *vqmmc;
+
+ vqmmc = regulator_get(dev, "vqmmc");
+ if (IS_ERR(vqmmc)) {
+ ret = PTR_ERR(vqmmc);
+ goto reg_put;
+ }
+
+ /* voltage capabilities might be set by boot loader, clear it */
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
+
+ if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3))
+ reg |= CAPA_VS33;
+ if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8))
+ reg |= CAPA_VS18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
+
+reg_put:
+ regulator_put(vqmmc);
+
+ return ret;
+}
+
+static const struct sdhci_pltfm_data sdhci_omap_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .quirks2 = SDHCI_QUIRK2_NO_1_8_V |
+ SDHCI_QUIRK2_ACMD23_BROKEN |
+ SDHCI_QUIRK2_RSP_136_HAS_CRC,
+ .ops = &sdhci_omap_ops,
+};
+
+static const struct sdhci_omap_data dra7_data = {
+ .offset = 0x200,
+};
+
+static const struct of_device_id omap_sdhci_match[] = {
+ { .compatible = "ti,dra7-sdhci", .data = &dra7_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_sdhci_match);
+
+static int sdhci_omap_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 offset;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct mmc_host *mmc;
+ const struct of_device_id *match;
+ struct sdhci_omap_data *data;
+
+ match = of_match_device(omap_sdhci_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct sdhci_omap_data *)match->data;
+ if (!data) {
+ dev_err(dev, "no sdhci omap data\n");
+ return -EINVAL;
+ }
+ offset = data->offset;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
+ sizeof(*omap_host));
+ if (IS_ERR(host)) {
+ dev_err(dev, "Failed sdhci_pltfm_init\n");
+ return PTR_ERR(host);
+ }
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ omap_host->host = host;
+ omap_host->base = host->ioaddr;
+ omap_host->dev = dev;
+ host->ioaddr += offset;
+
+ mmc = host->mmc;
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_pltfm_free;
+
+ pltfm_host->clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err_pltfm_free;
+ }
+
+ ret = clk_set_rate(pltfm_host->clk, mmc->f_max);
+ if (ret) {
+ dev_err(dev, "failed to set clock to %d\n", mmc->f_max);
+ goto err_pltfm_free;
+ }
+
+ omap_host->pbias = devm_regulator_get_optional(dev, "pbias");
+ if (IS_ERR(omap_host->pbias)) {
+ ret = PTR_ERR(omap_host->pbias);
+ if (ret != -ENODEV)
+ goto err_pltfm_free;
+ dev_dbg(dev, "unable to get pbias regulator %d\n", ret);
+ }
+ omap_host->pbias_enabled = false;
+
+ /*
+ * omap_device_pm_domain has callbacks to enable the main
+ * functional clock, interface clock and also configure the
+ * SYSCONFIG register of omap devices. The callback will be invoked
+ * as part of pm_runtime_get_sync.
+ */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ pm_runtime_put_noidle(dev);
+ goto err_rpm_disable;
+ }
+
+ ret = sdhci_omap_set_capabilities(omap_host);
+ if (ret) {
+ dev_err(dev, "failed to set system capabilities\n");
+ goto err_put_sync;
+ }
+
+ host->mmc_host_ops.get_ro = mmc_gpio_get_ro;
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_omap_start_signal_voltage_switch;
+ host->mmc_host_ops.set_ios = sdhci_omap_set_ios;
+
+ sdhci_read_caps(host);
+ host->caps |= SDHCI_CAN_DO_ADMA2;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_put_sync;
+
+ return 0;
+
+err_put_sync:
+ pm_runtime_put_sync(dev);
+
+err_rpm_disable:
+ pm_runtime_disable(dev);
+
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_omap_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ sdhci_remove_host(host, true);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_omap_driver = {
+ .probe = sdhci_omap_probe,
+ .remove = sdhci_omap_remove,
+ .driver = {
+ .name = "sdhci-omap",
+ .of_match_table = omap_sdhci_match,
+ },
+};
+
+module_platform_driver(sdhci_omap_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for OMAP SoCs");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci_omap");
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 67d787fa3306..3e4f04fd5175 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,7 +32,6 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
@@ -798,15 +797,6 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.probe_slot = intel_mrfld_mmc_probe_slot,
};
-/* O2Micro extra registers */
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -1290,6 +1280,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
+ SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 14273ca00641..555970a29c94 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -19,7 +19,40 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
+
+/*
+ * O2Micro device registers
+ */
+
+#define O2_SD_MISC_REG5 0x64
+#define O2_SD_LD0_CTRL 0x68
+#define O2_SD_DEV_CTRL 0x88
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_TEST_REG 0xD4
+#define O2_SD_FUNC_REG0 0xDC
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+#define O2_SD_MISC_CTRL4 0xFC
+#define O2_SD_TUNING_CTRL 0x300
+#define O2_SD_PLL_SETTING 0x304
+#define O2_SD_CLK_SETTING 0x328
+#define O2_SD_CAP_REG2 0x330
+#define O2_SD_CAP_REG0 0x334
+#define O2_SD_UHS1_CAP_SETTING 0x33C
+#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_UHS2_L1_CTRL 0x35C
+#define O2_SD_FUNC_REG3 0x3E0
+#define O2_SD_FUNC_REG4 0x3E4
+#define O2_SD_LED_ENABLE BIT(6)
+#define O2_SD_FREG0_LEDOFF BIT(13)
+#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
+
+#define O2_SD_VENDOR_SETTING 0x110
+#define O2_SD_VENDOR_SETTING2 0x1C8
static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
{
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h
deleted file mode 100644
index 770f53857211..000000000000
--- a/drivers/mmc/host/sdhci-pci-o2micro.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2013 BayHub Technology Ltd.
- *
- * Authors: Peter Guo <peter.guo@bayhubtech.com>
- * Adam Lee <adam.lee@canonical.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __SDHCI_PCI_O2MICRO_H
-#define __SDHCI_PCI_O2MICRO_H
-
-#include "sdhci-pci.h"
-
-/*
- * O2Micro device IDs
- */
-
-#define PCI_DEVICE_ID_O2_SDS0 0x8420
-#define PCI_DEVICE_ID_O2_SDS1 0x8421
-#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
-#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
-#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
-
-/*
- * O2Micro device registers
- */
-
-#define O2_SD_MISC_REG5 0x64
-#define O2_SD_LD0_CTRL 0x68
-#define O2_SD_DEV_CTRL 0x88
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_TEST_REG 0xD4
-#define O2_SD_FUNC_REG0 0xDC
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-#define O2_SD_MISC_CTRL4 0xFC
-#define O2_SD_TUNING_CTRL 0x300
-#define O2_SD_PLL_SETTING 0x304
-#define O2_SD_CLK_SETTING 0x328
-#define O2_SD_CAP_REG2 0x330
-#define O2_SD_CAP_REG0 0x334
-#define O2_SD_UHS1_CAP_SETTING 0x33C
-#define O2_SD_DELAY_CTRL 0x350
-#define O2_SD_UHS2_L1_CTRL 0x35C
-#define O2_SD_FUNC_REG3 0x3E0
-#define O2_SD_FUNC_REG4 0x3E4
-#define O2_SD_LED_ENABLE BIT(6)
-#define O2_SD_FREG0_LEDOFF BIT(13)
-#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
-
-#define O2_SD_VENDOR_SETTING 0x110
-#define O2_SD_VENDOR_SETTING2 0x1C8
-
-extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
-
-extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
-
-extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
-
-#endif /* __SDHCI_PCI_O2MICRO_H */
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 3e8ea3e566f6..0056f08a29cc 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -6,6 +6,12 @@
* PCI device IDs, sub IDs
*/
+#define PCI_DEVICE_ID_O2_SDS0 0x8420
+#define PCI_DEVICE_ID_O2_SDS1 0x8421
+#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
+#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
+#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
@@ -26,6 +32,7 @@
#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
#define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db
+#define PCI_DEVICE_ID_INTEL_CDF_EMMC 0x18db
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
@@ -164,4 +171,10 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
+int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
+int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
+#ifdef CONFIG_PM_SLEEP
+int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
+#endif
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index d328fcf284d1..cda83ccb2702 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -761,32 +761,24 @@ static const struct dev_pm_ops sdhci_s3c_pmops = {
NULL)
};
-#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
-static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
- .no_divider = true,
-};
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
-#else
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL)
-#endif
-
static const struct platform_device_id sdhci_s3c_driver_ids[] = {
{
.name = "s3c-sdhci",
.driver_data = (kernel_ulong_t)NULL,
- }, {
- .name = "exynos4-sdhci",
- .driver_data = EXYNOS4_SDHCI_DRV_DATA,
},
{ }
};
MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
#ifdef CONFIG_OF
+static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+ .no_divider = true,
+};
+
static const struct of_device_id sdhci_s3c_dt_match[] = {
{ .compatible = "samsung,s3c6410-sdhci", },
{ .compatible = "samsung,exynos4210-sdhci",
- .data = (void *)EXYNOS4_SDHCI_DRV_DATA },
+ .data = &exynos4_sdhci_drv_data },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 0cd6fa80db66..b877c13184c2 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -422,7 +422,15 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ /* SDHCI controllers on Tegra186 support 40-bit addressing.
+ * IOVA addresses are 48-bit wide on Tegra186.
+ * With 64-bit dma mask used for SDHCI, accesses can
+ * be broken. Disable 64-bit dma, which would fall back
+ * to 32-bit dma mask. Ideally 40-bit dma mask would work,
+ * But it is not supported as of now.
+ */
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.ops = &tegra114_sdhci_ops,
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0d5fcca18c9e..2f14334e42df 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2407,12 +2407,12 @@ static void sdhci_tasklet_finish(unsigned long param)
;
}
-static void sdhci_timeout_timer(unsigned long data)
+static void sdhci_timeout_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host*)data;
+ host = from_timer(host, t, timer);
spin_lock_irqsave(&host->lock, flags);
@@ -2429,12 +2429,12 @@ static void sdhci_timeout_timer(unsigned long data)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void sdhci_timeout_data_timer(unsigned long data)
+static void sdhci_timeout_data_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host *)data;
+ host = from_timer(host, t, data_timer);
spin_lock_irqsave(&host->lock, flags);
@@ -3238,7 +3238,7 @@ int sdhci_setup_host(struct sdhci_host *host)
* available.
*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
DBG("Version: 0x%08x | Present: 0x%08x\n",
@@ -3749,9 +3749,8 @@ int __sdhci_add_host(struct sdhci_host *host)
tasklet_init(&host->finish_tasklet,
sdhci_tasklet_finish, (unsigned long)host);
- setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
- setup_timer(&host->data_timer, sdhci_timeout_data_timer,
- (unsigned long)host);
+ timer_setup(&host->timer, sdhci_timeout_timer, 0);
+ timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
init_waitqueue_head(&host->buf_ready_int);
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 111b66f5439b..04ca0d33a521 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include "sdhci-pltfm.h"
@@ -47,6 +48,7 @@ struct f_sdhost_priv {
struct clk *clk;
u32 vendor_hs200;
struct device *dev;
+ bool enable_cmd_dat_delay;
};
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
@@ -84,10 +86,19 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctl;
+
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL);
sdhci_reset(host, mask);
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
}
static const struct sdhci_ops sdhci_f_sdh30_ops = {
@@ -126,6 +137,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
SDHCI_QUIRK2_TUNING_WORK_AROUND;
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 53c970fe0873..cc98355dbdb9 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1175,11 +1175,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return -EINVAL;
ret = mmc_regulator_get_supply(host->mmc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get vmmc supply\n");
+ if (ret)
return ret;
- }
host->reg_base = devm_ioremap_resource(&pdev->dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 93c4b40df90a..a3d8380ab480 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -783,9 +783,9 @@ static void tifm_sd_end_cmd(unsigned long data)
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_abort(unsigned long data)
+static void tifm_sd_abort(struct timer_list *t)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = from_timer(host, t, timer);
pr_err("%s : card failed to respond for a long period of time "
"(%x, %x)\n",
@@ -968,7 +968,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
(unsigned long)host);
- setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
+ timer_setup(&host->timer, tifm_sd_abort, 0);
mmc->ops = &tifm_sd_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 9c4e6199b854..583bf3262df5 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -167,11 +167,11 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- msleep(10);
+ usleep_range(10000, 11000);
}
}
@@ -179,7 +179,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
}
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -187,7 +187,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
@@ -219,7 +219,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
tmio_mmc_clk_start(host);
}
@@ -230,11 +230,11 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
@@ -1113,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
{
struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
+ int err;
- mmc_regulator_get_supply(mmc);
+ err = mmc_regulator_get_supply(mmc);
+ if (err)
+ return err;
/* use ocr_mask if no regulator */
if (!mmc->ocr_avail)
@@ -1299,23 +1302,24 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
pm_runtime_enable(&pdev->dev);
ret = mmc_add_host(mmc);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
+
mmc_gpiod_request_cd_irq(mmc);
}
return 0;
+
+remove_host:
+ tmio_mmc_host_remove(_host);
+ return ret;
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 64da6a88cfb9..cdfeb15b6f05 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1757,7 +1757,7 @@ static int usdhi6_probe(struct platform_device *pdev)
return -ENOMEM;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto e_free_mmc;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index a838bf5480d8..32c4211506fc 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -932,12 +932,12 @@ out:
return result;
}
-static void via_sdc_timeout(unsigned long ulongdata)
+static void via_sdc_timeout(struct timer_list *t)
{
struct via_crdr_mmc_host *sdhost;
unsigned long flags;
- sdhost = (struct via_crdr_mmc_host *)ulongdata;
+ sdhost = from_timer(sdhost, t, timer);
spin_lock_irqsave(&sdhost->lock, flags);
@@ -1036,9 +1036,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
u32 lenreg;
u32 status;
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = via_sdc_timeout;
+ timer_setup(&host->timer, via_sdc_timeout, 0);
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 8f569d257405..1fe68137a30f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -741,9 +741,10 @@ static void vub300_deadwork_thread(struct work_struct *work)
kref_put(&vub300->kref, vub300_delete);
}
-static void vub300_inactivity_timer_expired(unsigned long data)
+static void vub300_inactivity_timer_expired(struct timer_list *t)
{ /* softirq */
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ inactivity_timer);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
} else if (vub300->cmd) {
@@ -1180,9 +1181,10 @@ static void send_command(struct vub300_mmc_host *vub300)
* timer callback runs in atomic mode
* so it cannot call usb_kill_urb()
*/
-static void vub300_sg_timed_out(unsigned long data)
+static void vub300_sg_timed_out(struct timer_list *t)
{
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ sg_transfer_timer);
vub300->usb_timed_out = 1;
usb_sg_cancel(&vub300->sg_request);
usb_unlink_urb(vub300->command_out_urb);
@@ -1244,12 +1246,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1291,12 +1289,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1349,6 +1343,12 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
sizeof(vub300->vub_name));
return;
}
+
+ return;
+
+copy_error_message:
+ strncpy(vub300->vub_name, "SDIO pseudocode download failed",
+ sizeof(vub300->vub_name));
}
/*
@@ -2323,13 +2323,10 @@ static int vub300_probe(struct usb_interface *interface,
INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
kref_init(&vub300->kref);
- init_timer(&vub300->sg_transfer_timer);
- vub300->sg_transfer_timer.data = (unsigned long)vub300;
- vub300->sg_transfer_timer.function = vub300_sg_timed_out;
+ timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0);
kref_get(&vub300->kref);
- init_timer(&vub300->inactivity_timer);
- vub300->inactivity_timer.data = (unsigned long)vub300;
- vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
+ timer_setup(&vub300->inactivity_timer,
+ vub300_inactivity_timer_expired, 0);
vub300->inactivity_timer.expires = jiffies + HZ;
add_timer(&vub300->inactivity_timer);
if (vub300->card_present)
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 546aaf8d1507..f4233576153b 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -956,9 +956,9 @@ static const struct mmc_host_ops wbsd_ops = {
* Helper function to reset detection ignore
*/
-static void wbsd_reset_ignore(unsigned long data)
+static void wbsd_reset_ignore(struct timer_list *t)
{
- struct wbsd_host *host = (struct wbsd_host *)data;
+ struct wbsd_host *host = from_timer(host, t, ignore_timer);
BUG_ON(host == NULL);
@@ -1224,9 +1224,7 @@ static int wbsd_alloc_mmc(struct device *dev)
/*
* Set up timers
*/
- init_timer(&host->ignore_timer);
- host->ignore_timer.data = (unsigned long)host;
- host->ignore_timer.function = wbsd_reset_ignore;
+ timer_setup(&host->ignore_timer, wbsd_reset_ignore, 0);
/*
* Maximum number of segments. Worst case is one sector per segment
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c99dc59d729b..b2db581131b2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2042,6 +2042,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
+ slave->link_new_state = slave->link;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -3253,7 +3254,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
hash ^= (hash >> 16);
hash ^= (hash >> 8);
- return hash;
+ return hash >> 1;
}
/*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index cf7c18947189..d065c0e2d18e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 46a746ee80bb..b5145a7f874c 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 4d1fe8d95042..2772d05ff11c 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -670,9 +670,9 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */
- tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
- writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
- priv->base + IFI_CANFD_TDELAY);
+ tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
+ tdc &= IFI_CANFD_TDELAY_MASK;
+ writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 51c2d182a33a..b4efd711f824 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -29,14 +29,19 @@
#include "peak_canfd_user.h"
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
-MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards");
-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards");
+MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
+MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");
MODULE_LICENSE("GPL v2");
#define PCIEFD_DRV_NAME "peak_pciefd"
#define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */
#define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */
+#define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */
+#define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */
+#define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */
+#define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */
+#define PCAN_M2_ID 0x001a /* for M2 slot cards */
/* PEAK PCIe board access description */
#define PCIEFD_BAR0_SIZE (64 * 1024)
@@ -203,6 +208,11 @@ struct pciefd_board {
/* supported device ids. */
static const struct pci_device_id peak_pciefd_tbl[] = {
{PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,},
{0,}
};
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index b0c80859f746..1ac2090a1721 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
}
stats->rx_over_errors++;
stats->rx_errors++;
+
+ /* reset the CAN IP by entering reset mode
+ * ignoring timeout error
+ */
+ set_reset_mode(dev);
+ set_normal_mode(dev);
+
/* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
}
@@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
- if (isrc & SUN4I_INT_RBUF_VLD) {
- /* receive interrupt */
+ if ((isrc & SUN4I_INT_RBUF_VLD) &&
+ !(isrc & SUN4I_INT_DATA_OR)) {
+ /* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */
sun4i_can_rx(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index f2d623a7aee0..123e2c1b65f5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
#define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x2D
+#define T4FW_VERSION_MICRO 0x3F
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
#define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x2D
+#define T5FW_VERSION_MICRO 0x3F
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
#define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_MICRO 0x3F
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a37af5813f33..fcf9ba5eb8d1 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -6747,6 +6747,9 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+ irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+
err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
if (err)
goto err;
@@ -6776,6 +6779,7 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
struct mvpp2_queue_vector *qv = port->qvecs + i;
irq_set_affinity_hint(qv->irq, NULL);
+ irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
free_irq(qv->irq, qv);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index fc281712869b..17b723218b0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -93,7 +93,7 @@ static void delayed_event_release(struct mlx5_device_context *dev_ctx,
list_splice_init(&priv->waiting_events_list, &temp);
if (!dev_ctx->context)
goto out;
- list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
+ list_for_each_entry_safe(de, n, &temp, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index cc13d3dbd366..13b5ef9d8703 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -67,7 +67,7 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 850cdc980ab5..4837045ffba3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -365,21 +365,24 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
struct mlx5e_l2_hash_node *hn)
{
u8 action = hn->action;
+ u8 mac_addr[ETH_ALEN];
int l2_err = 0;
+ ether_addr_copy(mac_addr, hn->ai.addr);
+
switch (action) {
case MLX5E_ACTION_ADD:
mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
- if (!is_multicast_ether_addr(hn->ai.addr)) {
- l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr);
+ if (!is_multicast_ether_addr(mac_addr)) {
+ l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
hn->mpfs = !l2_err;
}
hn->action = MLX5E_ACTION_NONE;
break;
case MLX5E_ACTION_DEL:
- if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs)
- l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr);
+ if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
+ l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
mlx5e_del_l2_flow_rule(priv, &hn->ai);
mlx5e_del_l2_from_hash(hn);
break;
@@ -387,7 +390,7 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
if (l2_err)
netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err);
+ action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
}
static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 15a1687483cc..91b1b0938931 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -215,22 +215,20 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
- struct page *page;
-
if (mlx5e_rx_cache_get(rq, dma_info))
return 0;
- page = dev_alloc_pages(rq->buff.page_order);
- if (unlikely(!page))
+ dma_info->page = dev_alloc_pages(rq->buff.page_order);
+ if (unlikely(!dma_info->page))
return -ENOMEM;
- dma_info->addr = dma_map_page(rq->pdev, page, 0,
+ dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
RQ_PAGE_SIZE(rq), rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
- put_page(page);
+ put_page(dma_info->page);
+ dma_info->page = NULL;
return -ENOMEM;
}
- dma_info->page = page;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index e906b754415c..ab92298eafc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,7 +49,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
bool busy = false;
- int work_done;
+ int work_done = 0;
int i;
for (i = 0; i < c->num_tc; i++)
@@ -58,15 +58,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
- work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
- busy |= work_done == budget;
+ if (likely(budget)) { /* budget=0 means: don't poll rx rings */
+ work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
+ busy |= work_done == budget;
+ }
busy |= c->rq.post_wqes(&c->rq);
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
- if (work_done == budget)
+ if (budget && work_done == budget)
work_done--;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0d2c8dcd6eae..06562c9a6b9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1482,9 +1482,16 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
return -EAGAIN;
}
+ /* Panic tear down fw command will stop the PCI bus communication
+ * with the HCA, so the health polll is no longer needed.
+ */
+ mlx5_drain_health_wq(dev);
+ mlx5_stop_health_poll(dev);
+
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
+ mlx5_start_health_poll(dev);
return ret;
}
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b2ff88e69a81..3d4f7959dabb 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -626,7 +626,7 @@ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
struct usbnet *dev = usb_get_intfdata(intf);
struct asix_common_private *priv = dev->driver_priv;
- if (priv->suspend)
+ if (priv && priv->suspend)
priv->suspend(dev);
return usbnet_suspend(intf, message);
@@ -678,7 +678,7 @@ static int asix_resume(struct usb_interface *intf)
struct usbnet *dev = usb_get_intfdata(intf);
struct asix_common_private *priv = dev->driver_priv;
- if (priv->resume)
+ if (priv && priv->resume)
priv->resume(dev);
return usbnet_resume(intf);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3e7a3ac3a362..05dca3e5c93d 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -230,7 +230,7 @@ skip:
goto bad_desc;
}
- if (header.usb_cdc_ether_desc) {
+ if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
/* because of Zaurus, we may be ignoring the host
* side link address we were given.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8c3733608271..8d4a6f7cba61 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -499,6 +499,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 1;
}
if (rawip) {
+ skb_reset_mac_header(skb);
skb->dev = dev->net; /* normally set by eth_type_trans */
skb->protocol = proto;
return 1;
@@ -681,7 +682,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* errors aren't fatal - we can live with the dynamic address */
- if (cdc_ether) {
+ if (cdc_ether && cdc_ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 0fd6195601ba..96cd55f9e3c5 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -244,7 +244,7 @@ config REGULATOR_DA9210
interface.
config REGULATOR_DA9211
- tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 regulator"
+ tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator"
depends on I2C
select REGMAP_I2C
help
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 376a99b7cf5d..181622b2813d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -244,6 +244,7 @@ static const struct regulator_desc axp22x_drivevbus_regulator = {
.ops = &axp20x_ops_sw,
};
+/* DCDC ranges shared with AXP813 */
static const struct regulator_linear_range axp803_dcdc234_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0x0, 0x46, 10000),
REGULATOR_LINEAR_RANGE(1220000, 0x47, 0x4b, 20000),
@@ -426,6 +427,69 @@ static const struct regulator_desc axp809_regulators[] = {
AXP_DESC_SW(AXP809, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(6)),
};
+static const struct regulator_desc axp813_regulators[] = {
+ AXP_DESC(AXP813, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
+ AXP803_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(0)),
+ AXP_DESC_RANGES(AXP813, DCDC2, "dcdc2", "vin2", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC2_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(1)),
+ AXP_DESC_RANGES(AXP813, DCDC3, "dcdc3", "vin3", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC3_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(2)),
+ AXP_DESC_RANGES(AXP813, DCDC4, "dcdc4", "vin4", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC4_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(3)),
+ AXP_DESC_RANGES(AXP813, DCDC5, "dcdc5", "vin5", axp803_dcdc5_ranges,
+ 68, AXP803_DCDC5_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(4)),
+ AXP_DESC_RANGES(AXP813, DCDC6, "dcdc6", "vin6", axp803_dcdc6_ranges,
+ 72, AXP803_DCDC6_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(5)),
+ AXP_DESC_RANGES(AXP813, DCDC7, "dcdc7", "vin7", axp803_dcdc6_ranges,
+ 72, AXP813_DCDC7_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(6)),
+ AXP_DESC(AXP813, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(5)),
+ AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(6)),
+ AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(7)),
+ AXP_DESC(AXP813, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(3)),
+ AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges,
+ 32, AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
+ BIT(4)),
+ AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
+ AXP_DESC(AXP813, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO4_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(6)),
+ AXP_DESC(AXP813, ELDO1, "eldo1", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+ AXP_DESC(AXP813, ELDO2, "eldo2", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+ /* to do / check ... */
+ AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
+ AXP803_FLDO1_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(2)),
+ AXP_DESC(AXP813, FLDO2, "fldo2", "fldoin", 700, 1450, 50,
+ AXP803_FLDO2_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(3)),
+ /*
+ * TODO: FLDO3 = {DCDC5, FLDOIN} / 2
+ *
+ * This means FLDO3 effectively switches supplies at runtime,
+ * something the regulator subsystem does not support.
+ */
+ AXP_DESC_FIXED(AXP813, RTC_LDO, "rtc-ldo", "ips", 1800),
+ AXP_DESC_IO(AXP813, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_IO(AXP813, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_SW(AXP813, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(7)),
+};
+
static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
@@ -441,9 +505,10 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
step = 75;
break;
case AXP803_ID:
+ case AXP813_ID:
/*
- * AXP803 DCDC work frequency setting has the same range and
- * step as AXP22X, but at a different register.
+ * AXP803/AXP813 DCDC work frequency setting has the same
+ * range and step as AXP22X, but at a different register.
* Fall through to the check below.
* (See include/linux/mfd/axp20x.h)
*/
@@ -561,6 +626,14 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
workmode <<= id - AXP803_DCDC1;
break;
+ case AXP813_ID:
+ if (id < AXP813_DCDC1 || id > AXP813_DCDC7)
+ return -EINVAL;
+
+ mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP813_DCDC1);
+ workmode <<= id - AXP813_DCDC1;
+ break;
+
default:
/* should not happen */
WARN_ON(1);
@@ -579,11 +652,12 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
u32 reg = 0;
/*
- * Currently in our supported AXP variants, only AXP803 and AXP806
- * have polyphase regulators.
+ * Currently in our supported AXP variants, only AXP803, AXP806,
+ * and AXP813 have polyphase regulators.
*/
switch (axp20x->variant) {
case AXP803_ID:
+ case AXP813_ID:
regmap_read(axp20x->regmap, AXP803_POLYPHASE_CTRL, &reg);
switch (id) {
@@ -656,6 +730,12 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
regulators = axp809_regulators;
nregulators = AXP809_REG_ID_MAX;
break;
+ case AXP813_ID:
+ regulators = axp813_regulators;
+ nregulators = AXP813_REG_ID_MAX;
+ drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
+ "x-powers,drive-vbus-en");
+ break;
default:
dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
axp20x->variant);
@@ -677,6 +757,10 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
if (axp20x_is_polyphase_slave(axp20x, i))
continue;
+ /* Support for AXP813's FLDO3 is not implemented */
+ if (axp20x->variant == AXP813_ID && i == AXP813_FLDO3)
+ continue;
+
/*
* Regulators DC1SW and DC5LDO are connected internally,
* so we have to handle their supply names separately.
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index aa47280efd32..9b8f47617724 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,6 +1,6 @@
/*
* da9211-regulator.c - Regulator device driver for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This library is free software; you can redistribute it and/or
@@ -496,8 +496,11 @@ static const struct i2c_device_id da9211_i2c_id[] = {
{"da9211", DA9211},
{"da9212", DA9212},
{"da9213", DA9213},
+ {"da9223", DA9223},
{"da9214", DA9214},
+ {"da9224", DA9224},
{"da9215", DA9215},
+ {"da9225", DA9225},
{},
};
MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
@@ -507,8 +510,11 @@ static const struct of_device_id da9211_dt_ids[] = {
{ .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] },
{ .compatible = "dlg,da9212", .data = &da9211_i2c_id[1] },
{ .compatible = "dlg,da9213", .data = &da9211_i2c_id[2] },
- { .compatible = "dlg,da9214", .data = &da9211_i2c_id[3] },
- { .compatible = "dlg,da9215", .data = &da9211_i2c_id[4] },
+ { .compatible = "dlg,da9223", .data = &da9211_i2c_id[3] },
+ { .compatible = "dlg,da9214", .data = &da9211_i2c_id[4] },
+ { .compatible = "dlg,da9224", .data = &da9211_i2c_id[5] },
+ { .compatible = "dlg,da9215", .data = &da9211_i2c_id[6] },
+ { .compatible = "dlg,da9225", .data = &da9211_i2c_id[7] },
{},
};
MODULE_DEVICE_TABLE(of, da9211_dt_ids);
@@ -526,5 +532,5 @@ static struct i2c_driver da9211_regulator_driver = {
module_i2c_driver(da9211_regulator_driver);
MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
-MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9214/DA9215 regulator driver");
+MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index b841bbf330cc..2cb32aab4f82 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,6 +1,6 @@
/*
* da9211-regulator.h - Regulator definitions for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 0cb76ba29e84..8f782d22fdbe 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -34,6 +34,8 @@ struct pbias_reg_info {
u32 vmode;
unsigned int enable_time;
char *name;
+ const unsigned int *pbias_volt_table;
+ int n_voltages;
};
struct pbias_regulator_data {
@@ -49,11 +51,16 @@ struct pbias_of_data {
unsigned int offset;
};
-static const unsigned int pbias_volt_table[] = {
+static const unsigned int pbias_volt_table_3_0V[] = {
1800000,
3000000
};
+static const unsigned int pbias_volt_table_3_3V[] = {
+ 1800000,
+ 3300000
+};
+
static const struct regulator_ops pbias_regulator_voltage_ops = {
.list_voltage = regulator_list_voltage_table,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -69,6 +76,8 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = {
.vmode = BIT(0),
.disable_val = 0,
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap2430"
};
@@ -77,6 +86,8 @@ static const struct pbias_reg_info pbias_sim_omap3 = {
.enable_mask = BIT(9),
.vmode = BIT(8),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_sim_omap3"
};
@@ -86,6 +97,8 @@ static const struct pbias_reg_info pbias_mmc_omap4 = {
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap4"
};
@@ -95,6 +108,8 @@ static const struct pbias_reg_info pbias_mmc_omap5 = {
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_3V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap5"
};
@@ -199,8 +214,8 @@ static int pbias_regulator_probe(struct platform_device *pdev)
drvdata[data_idx].desc.owner = THIS_MODULE;
drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
- drvdata[data_idx].desc.volt_table = pbias_volt_table;
- drvdata[data_idx].desc.n_voltages = 2;
+ drvdata[data_idx].desc.volt_table = info->pbias_volt_table;
+ drvdata[data_idx].desc.n_voltages = info->n_voltages;
drvdata[data_idx].desc.enable_time = info->enable_time;
drvdata[data_idx].desc.vsel_reg = offset;
drvdata[data_idx].desc.vsel_mask = info->vmode;
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 16c5f84e06a7..0241ada47d04 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -593,13 +593,20 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
u8 *voltage_sel)
{
const struct spmi_voltage_range *range, *end;
+ unsigned offset;
range = vreg->set_points->range;
end = range + vreg->set_points->count;
for (; range < end; range++) {
if (selector < range->n_voltages) {
- *voltage_sel = selector;
+ /*
+ * hardware selectors between set point min and real
+ * min are invalid so we ignore them
+ */
+ offset = range->set_point_min_uV - range->min_uV;
+ offset /= range->step_uV;
+ *voltage_sel = selector + offset;
*range_sel = range->range_sel;
return 0;
}
@@ -613,15 +620,35 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel,
const struct spmi_voltage_range *range)
{
- int sw_sel = hw_sel;
+ unsigned sw_sel = 0;
+ unsigned offset, max_hw_sel;
const struct spmi_voltage_range *r = vreg->set_points->range;
-
- while (r != range) {
+ const struct spmi_voltage_range *end = r + vreg->set_points->count;
+
+ for (; r < end; r++) {
+ if (r == range && range->n_voltages) {
+ /*
+ * hardware selectors between set point min and real
+ * min and between set point max and real max are
+ * invalid so we return an error if they're
+ * programmed into the hardware
+ */
+ offset = range->set_point_min_uV - range->min_uV;
+ offset /= range->step_uV;
+ if (hw_sel < offset)
+ return -EINVAL;
+
+ max_hw_sel = range->set_point_max_uV - range->min_uV;
+ max_hw_sel /= range->step_uV;
+ if (hw_sel > max_hw_sel)
+ return -EINVAL;
+
+ return sw_sel + hw_sel - offset;
+ }
sw_sel += r->n_voltages;
- r++;
}
- return sw_sel;
+ return -EINVAL;
}
static const struct spmi_voltage_range *
@@ -1619,11 +1646,20 @@ static const struct spmi_regulator_data pm8994_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pmi8994_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "l1", 0x4000, "vdd_l1", },
+ { }
+};
+
static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
+ { .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 9aafbb03482d..bc489958fed7 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -154,7 +154,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
if (!tps->strobes[rid]) {
if (rid == TPS65218_DCDC_3)
- tps->info[rid]->strobe = 3;
+ tps->strobes[rid] = 3;
else
return -EINVAL;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ad3ea24f0885..bcc1694cebcd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2685,7 +2685,6 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
}
sdev->sdev_state = state;
- sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
return 0;
illegal:
@@ -3109,7 +3108,6 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
case SDEV_BLOCK:
case SDEV_TRANSPORT_OFFLINE:
sdev->sdev_state = new_state;
- sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
break;
case SDEV_CREATED_BLOCK:
if (new_state == SDEV_TRANSPORT_OFFLINE ||
@@ -3117,7 +3115,6 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
sdev->sdev_state = new_state;
else
sdev->sdev_state = SDEV_CREATED;
- sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
break;
case SDEV_CANCEL:
case SDEV_OFFLINE:
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 4f6f01cf9968..36f6190931bc 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -556,11 +556,8 @@ int srp_reconnect_rport(struct srp_rport *rport)
*/
shost_for_each_device(sdev, shost) {
mutex_lock(&sdev->state_mutex);
- if (sdev->sdev_state == SDEV_OFFLINE) {
+ if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
- sysfs_notify(&sdev->sdev_gendev.kobj,
- NULL, "state");
- }
mutex_unlock(&sdev->state_mutex);
}
} else if (rport->state == SRP_RPORT_RUNNING) {
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a75f2a2cf780..603783976b81 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -1,10 +1,6 @@
#
# SPI driver configuration
#
-# NOTE: the reason this doesn't show SPI slave support is mostly that
-# nobody's needed a slave side API yet. The master-role API is not
-# fully appropriate there, so it'd need some thought to do well.
-#
menuconfig SPI
bool "SPI support"
depends on HAS_IOMEM
@@ -379,7 +375,7 @@ config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
depends on HAS_DMA
- depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
@@ -626,6 +622,13 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
+config SPI_SPRD_ADI
+ tristate "Spreadtrum ADI controller"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HWSPINLOCK || (COMPILE_TEST && !HWSPINLOCK)
+ help
+ ADI driver based on SPI for Spreadtrum SoCs.
+
config SPI_STM32
tristate "STMicroelectronics STM32 SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8e0cda73b324..34c5f2832ddf 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 568e1c65aa82..77fe55ce790c 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -213,7 +213,7 @@ static void a3700_spi_mode_set(struct a3700_spi *a3700_spi,
}
static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
- unsigned int speed_hz, u16 mode)
+ unsigned int speed_hz)
{
u32 val;
u32 prescale;
@@ -231,17 +231,6 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
val |= A3700_SPI_CLK_CAPT_EDGE;
spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val);
}
-
- val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
- val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA);
-
- if (mode & SPI_CPOL)
- val |= A3700_SPI_CLK_POL;
-
- if (mode & SPI_CPHA)
- val |= A3700_SPI_CLK_PHA;
-
- spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len)
@@ -423,7 +412,7 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
a3700_spi = spi_master_get_devdata(spi->master);
- a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode);
+ a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
byte_len = xfer->bits_per_word >> 3;
@@ -584,6 +573,8 @@ static int a3700_spi_prepare_message(struct spi_master *master,
a3700_spi_bytelen_set(a3700_spi, 4);
+ a3700_spi_mode_set(a3700_spi, spi->mode);
+
return 0;
}
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 6ab4c7700228..68cfc351b47f 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -553,7 +553,7 @@ err_put_master:
static int spi_engine_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct spi_engine *spi_engine = spi_master_get_devdata(master);
int irq = platform_get_irq(pdev, 0);
@@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev)
free_irq(irq, master);
+ spi_master_put(master);
+
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index d89127f4a46d..f652f70cb8db 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -32,6 +32,7 @@
#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi-fsl-dspi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/time.h>
@@ -151,6 +152,11 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
.max_clock_factor = 8,
};
+static const struct fsl_dspi_devtype_data coldfire_data = {
+ .trans_mode = DSPI_EOQ_MODE,
+ .max_clock_factor = 8,
+};
+
struct fsl_dspi_dma {
/* Length of transfer in words of DSPI_FIFO_SIZE */
u32 curr_xfer_len;
@@ -741,6 +747,7 @@ static int dspi_setup(struct spi_device *spi)
{
struct chip_data *chip;
struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ struct fsl_dspi_platform_data *pdata;
u32 cs_sck_delay = 0, sck_cs_delay = 0;
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
unsigned char pasc = 0, asc = 0, fmsz = 0;
@@ -761,11 +768,18 @@ static int dspi_setup(struct spi_device *spi)
return -ENOMEM;
}
- of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
- &cs_sck_delay);
+ pdata = dev_get_platdata(&dspi->pdev->dev);
- of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
- &sck_cs_delay);
+ if (!pdata) {
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
+ } else {
+ cs_sck_delay = pdata->cs_sck_delay;
+ sck_cs_delay = pdata->sck_cs_delay;
+ }
chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
@@ -949,6 +963,7 @@ static int dspi_probe(struct platform_device *pdev)
struct fsl_dspi *dspi;
struct resource *res;
void __iomem *base;
+ struct fsl_dspi_platform_data *pdata;
int ret = 0, cs_num, bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
@@ -969,25 +984,34 @@ static int dspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
- ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
- goto out_master_put;
- }
- master->num_chipselect = cs_num;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ master->num_chipselect = pdata->cs_num;
+ master->bus_num = pdata->bus_num;
- ret = of_property_read_u32(np, "bus-num", &bus_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get bus-num\n");
- goto out_master_put;
- }
- master->bus_num = bus_num;
+ dspi->devtype_data = &coldfire_data;
+ } else {
- dspi->devtype_data = of_device_get_match_data(&pdev->dev);
- if (!dspi->devtype_data) {
- dev_err(&pdev->dev, "can't get devtype_data\n");
- ret = -EFAULT;
- goto out_master_put;
+ ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
+ goto out_master_put;
+ }
+ master->num_chipselect = cs_num;
+
+ ret = of_property_read_u32(np, "bus-num", &bus_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get bus-num\n");
+ goto out_master_put;
+ }
+ master->bus_num = bus_num;
+
+ dspi->devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!dspi->devtype_data) {
+ dev_err(&pdev->dev, "can't get devtype_data\n");
+ ret = -EFAULT;
+ goto out_master_put;
+ }
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index babb15f07995..79ddefe4180d 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -53,10 +53,13 @@
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
+#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
/* The maximum bytes that a sdma BD can transfer.*/
#define MAX_SDMA_BD_BYTES (1 << 15)
#define MX51_ECSPI_CTRL_MAX_BURST 512
+/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
+#define MX53_MAX_TRANSFER_BYTES 512
enum spi_imx_devtype {
IMX1_CSPI,
@@ -76,7 +79,9 @@ struct spi_imx_devtype_data {
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
+ void (*disable)(struct spi_imx_data *);
bool has_dmamode;
+ bool has_slavemode;
unsigned int fifo_size;
bool dynamic_burst;
enum spi_imx_devtype devtype;
@@ -108,6 +113,11 @@ struct spi_imx_data {
unsigned int dynamic_burst, read_u32;
unsigned int word_mask;
+ /* Slave mode */
+ bool slave_mode;
+ bool slave_aborted;
+ unsigned int slave_burst;
+
/* DMA */
bool usedma;
u32 wml;
@@ -221,6 +231,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
if (!master->dma_rx)
return false;
+ if (spi_imx->slave_mode)
+ return false;
+
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4)
@@ -262,6 +275,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
#define MX51_ECSPI_INT 0x10
#define MX51_ECSPI_INT_TEEN (1 << 0)
#define MX51_ECSPI_INT_RREN (1 << 3)
+#define MX51_ECSPI_INT_RDREN (1 << 4)
#define MX51_ECSPI_DMA 0x14
#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
@@ -378,6 +392,44 @@ static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
spi_imx_buf_tx_u16(spi_imx);
}
+static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
+
+ if (spi_imx->rx_buf) {
+ int n_bytes = spi_imx->slave_burst % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ memcpy(spi_imx->rx_buf,
+ ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
+
+ spi_imx->rx_buf += n_bytes;
+ spi_imx->slave_burst -= n_bytes;
+ }
+}
+
+static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = 0;
+ int n_bytes = spi_imx->count % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ if (spi_imx->tx_buf) {
+ memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
+ spi_imx->tx_buf, n_bytes);
+ val = cpu_to_be32(val);
+ spi_imx->tx_buf += n_bytes;
+ }
+
+ spi_imx->count -= n_bytes;
+
+ writel(val, spi_imx->base + MXC_CSPITXDATA);
+}
+
/* MX51 eCSPI */
static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
unsigned int fspi, unsigned int *fres)
@@ -427,6 +479,9 @@ static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
if (enable & MXC_INT_RR)
val |= MX51_ECSPI_INT_RREN;
+ if (enable & MXC_INT_RDR)
+ val |= MX51_ECSPI_INT_RDREN;
+
writel(val, spi_imx->base + MX51_ECSPI_INT);
}
@@ -439,6 +494,15 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
+static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
+{
+ u32 ctrl;
+
+ ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
static int mx51_ecspi_config(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
@@ -446,14 +510,11 @@ static int mx51_ecspi_config(struct spi_device *spi)
u32 clk = spi_imx->speed_hz, delay, reg;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
- /*
- * The hardware seems to have a race condition when changing modes. The
- * current assumption is that the selection of the channel arrives
- * earlier in the hardware than the mode bits when they are written at
- * the same time.
- * So set master mode for all channels as we do not support slave mode.
- */
- ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
+ /* set Master or Slave mode */
+ if (spi_imx->slave_mode)
+ ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
+ else
+ ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/*
* Enable SPI_RDY handling (falling edge/level triggered).
@@ -468,9 +529,22 @@ static int mx51_ecspi_config(struct spi_device *spi)
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
- ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ ctrl |= (spi_imx->slave_burst * 8 - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ else
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
- cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ /*
+ * eCSPI burst completion by Chip Select signal in Slave mode
+ * is not functional for imx53 Soc, config SPI burst completed when
+ * BURST_LENGTH + 1 bits are received
+ */
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ else
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
if (spi->mode & SPI_CPHA)
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
@@ -805,6 +879,7 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX1_CSPI,
};
@@ -817,6 +892,7 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX21_CSPI,
};
@@ -830,6 +906,7 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX27_CSPI,
};
@@ -842,6 +919,7 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX31_CSPI,
};
@@ -855,6 +933,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = true,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX35_CSPI,
};
@@ -867,6 +946,8 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
.devtype = IMX51_ECSPI,
};
@@ -878,6 +959,8 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
.reset = mx51_ecspi_reset,
.fifo_size = 64,
.has_dmamode = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
.devtype = IMX53_ECSPI,
};
@@ -945,14 +1028,16 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
spi_imx->txfifo++;
}
- spi_imx->devtype_data->trigger(spi_imx);
+ if (!spi_imx->slave_mode)
+ spi_imx->devtype_data->trigger(spi_imx);
}
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
{
struct spi_imx_data *spi_imx = dev_id;
- while (spi_imx->devtype_data->rx_available(spi_imx)) {
+ while (spi_imx->txfifo &&
+ spi_imx->devtype_data->rx_available(spi_imx)) {
spi_imx->rx(spi_imx);
spi_imx->txfifo--;
}
@@ -1034,7 +1119,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->speed_hz = t->speed_hz;
/* Initialize the functions for transfer */
- if (spi_imx->devtype_data->dynamic_burst) {
+ if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode) {
u32 mask;
spi_imx->dynamic_burst = 0;
@@ -1078,6 +1163,12 @@ static int spi_imx_setupxfer(struct spi_device *spi,
return ret;
}
+ if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
+ spi_imx->rx = mx53_ecspi_rx_slave;
+ spi_imx->tx = mx53_ecspi_tx_slave;
+ spi_imx->slave_burst = t->len;
+ }
+
spi_imx->devtype_data->config(spi);
return 0;
@@ -1262,11 +1353,61 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
return transfer->len;
}
+static int spi_imx_pio_transfer_slave(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ int ret = transfer->len;
+
+ if (is_imx53_ecspi(spi_imx) &&
+ transfer->len > MX53_MAX_TRANSFER_BYTES) {
+ dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
+ MX53_MAX_TRANSFER_BYTES);
+ return -EMSGSIZE;
+ }
+
+ spi_imx->tx_buf = transfer->tx_buf;
+ spi_imx->rx_buf = transfer->rx_buf;
+ spi_imx->count = transfer->len;
+ spi_imx->txfifo = 0;
+
+ reinit_completion(&spi_imx->xfer_done);
+ spi_imx->slave_aborted = false;
+
+ spi_imx_push(spi_imx);
+
+ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
+
+ if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
+ spi_imx->slave_aborted) {
+ dev_dbg(&spi->dev, "interrupted\n");
+ ret = -EINTR;
+ }
+
+ /* ecspi has a HW issue when works in Slave mode,
+ * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
+ * ECSPI_TXDATA keeps shift out the last word data,
+ * so we have to disable ECSPI when in slave mode after the
+ * transfer completes
+ */
+ if (spi_imx->devtype_data->disable)
+ spi_imx->devtype_data->disable(spi_imx);
+
+ return ret;
+}
+
static int spi_imx_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ /* flush rxfifo before transfer */
+ while (spi_imx->devtype_data->rx_available(spi_imx))
+ spi_imx->rx(spi_imx);
+
+ if (spi_imx->slave_mode)
+ return spi_imx_pio_transfer_slave(spi, transfer);
+
if (spi_imx->usedma)
return spi_imx_dma_transfer(spi_imx, transfer);
else
@@ -1323,6 +1464,16 @@ spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
return 0;
}
+static int spi_imx_slave_abort(struct spi_master *master)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ spi_imx->slave_aborted = true;
+ complete(&spi_imx->xfer_done);
+
+ return 0;
+}
+
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1334,13 +1485,23 @@ static int spi_imx_probe(struct platform_device *pdev)
struct spi_imx_data *spi_imx;
struct resource *res;
int i, ret, irq, spi_drctl;
+ const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data :
+ (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
+ bool slave_mode;
if (!np && !mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
+ slave_mode = devtype_data->has_slavemode &&
+ of_property_read_bool(np, "spi-slave");
+ if (slave_mode)
+ master = spi_alloc_slave(&pdev->dev,
+ sizeof(struct spi_imx_data));
+ else
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_imx_data));
if (!master)
return -ENOMEM;
@@ -1358,20 +1519,29 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = master;
spi_imx->dev = &pdev->dev;
+ spi_imx->slave_mode = slave_mode;
- spi_imx->devtype_data = of_id ? of_id->data :
- (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
+ spi_imx->devtype_data = devtype_data;
+ /* Get number of chip selects, either platform data or OF */
if (mxc_platform_info) {
master->num_chipselect = mxc_platform_info->num_chipselect;
- master->cs_gpios = devm_kzalloc(&master->dev,
- sizeof(int) * master->num_chipselect, GFP_KERNEL);
- if (!master->cs_gpios)
- return -ENOMEM;
+ if (mxc_platform_info->chipselect) {
+ master->cs_gpios = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios)
+ return -ENOMEM;
+
+ for (i = 0; i < master->num_chipselect; i++)
+ master->cs_gpios[i] = mxc_platform_info->chipselect[i];
+ }
+ } else {
+ u32 num_cs;
- for (i = 0; i < master->num_chipselect; i++)
- master->cs_gpios[i] = mxc_platform_info->chipselect[i];
- }
+ if (!of_property_read_u32(np, "num-cs", &num_cs))
+ master->num_chipselect = num_cs;
+ /* If not preset, default value of 1 is used */
+ }
spi_imx->bitbang.chipselect = spi_imx_chipselect;
spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
@@ -1380,6 +1550,7 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
+ spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS;
if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
@@ -1451,37 +1622,38 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
+
+ /* Request GPIO CS lines, if any */
+ if (!spi_imx->slave_mode && master->cs_gpios) {
+ for (i = 0; i < master->num_chipselect; i++) {
+ if (!gpio_is_valid(master->cs_gpios[i]))
+ continue;
+
+ ret = devm_gpio_request(&pdev->dev,
+ master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
+ master->cs_gpios[i]);
+ goto out_spi_bitbang;
+ }
+ }
+ }
+
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
goto out_clk_put;
}
- if (!master->cs_gpios) {
- dev_err(&pdev->dev, "No CS GPIOs available\n");
- ret = -EINVAL;
- goto out_clk_put;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- if (!gpio_is_valid(master->cs_gpios[i]))
- continue;
-
- ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
- master->cs_gpios[i]);
- goto out_clk_put;
- }
- }
-
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
clk_disable(spi_imx->clk_per);
return ret;
+out_spi_bitbang:
+ spi_bitbang_stop(&spi_imx->bitbang);
out_clk_put:
clk_disable_unprepare(spi_imx->clk_ipg);
out_put_per:
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 5b0e9a3e83f6..3d216b950b41 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -44,6 +44,7 @@
#include <linux/completion.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/stmp_device.h>
#include <linux/spi/spi.h>
@@ -442,6 +443,85 @@ static int mxs_spi_transfer_one(struct spi_master *master,
return status;
}
+static int mxs_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ clk_disable_unprepare(ssp->clk);
+
+ ret = pinctrl_pm_select_idle_state(dev);
+ if (ret) {
+ int ret2 = clk_prepare_enable(ssp->clk);
+
+ if (ret2)
+ dev_warn(dev, "Failed to reenable clock after failing pinctrl request (pinctrl: %d, clk: %d)\n",
+ ret, ret2);
+ }
+
+ return ret;
+}
+
+static int mxs_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(ssp->clk);
+ if (ret)
+ pinctrl_pm_select_idle_state(dev);
+
+ return ret;
+}
+
+static int __maybe_unused mxs_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ return mxs_spi_runtime_suspend(dev);
+ else
+ return 0;
+}
+
+static int __maybe_unused mxs_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pm_runtime_suspended(dev))
+ ret = mxs_spi_runtime_resume(dev);
+ else
+ ret = 0;
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(master);
+ if (ret < 0 && !pm_runtime_suspended(dev))
+ mxs_spi_runtime_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mxs_spi_pm = {
+ SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend,
+ mxs_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume)
+};
+
static const struct of_device_id mxs_spi_dt_ids[] = {
{ .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
{ .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
@@ -493,12 +573,15 @@ static int mxs_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ platform_set_drvdata(pdev, master);
+
master->transfer_one_message = mxs_spi_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->num_chipselect = 3;
master->dev.of_node = np;
master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->auto_runtime_pm = true;
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
@@ -521,28 +604,41 @@ static int mxs_spi_probe(struct platform_device *pdev)
goto out_master_free;
}
- ret = clk_prepare_enable(ssp->clk);
- if (ret)
- goto out_dma_release;
+ pm_runtime_enable(ssp->dev);
+ if (!pm_runtime_enabled(ssp->dev)) {
+ ret = mxs_spi_runtime_resume(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime resume failed\n");
+ goto out_dma_release;
+ }
+ }
+
+ ret = pm_runtime_get_sync(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime_get_sync failed\n");
+ goto out_pm_runtime_disable;
+ }
clk_set_rate(ssp->clk, clk_freq);
ret = stmp_reset_block(ssp->base);
if (ret)
- goto out_disable_clk;
-
- platform_set_drvdata(pdev, master);
+ goto out_pm_runtime_put;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
- goto out_disable_clk;
+ goto out_pm_runtime_put;
}
+ pm_runtime_put(ssp->dev);
+
return 0;
-out_disable_clk:
- clk_disable_unprepare(ssp->clk);
+out_pm_runtime_put:
+ pm_runtime_put(ssp->dev);
+out_pm_runtime_disable:
+ pm_runtime_disable(ssp->dev);
out_dma_release:
dma_release_channel(ssp->dmach);
out_master_free:
@@ -560,7 +656,10 @@ static int mxs_spi_remove(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
- clk_disable_unprepare(ssp->clk);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mxs_spi_runtime_suspend(&pdev->dev);
+
dma_release_channel(ssp->dmach);
return 0;
@@ -572,6 +671,7 @@ static struct platform_driver mxs_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = mxs_spi_dt_ids,
+ .pm = &mxs_spi_pm,
},
};
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 4b6dd73b80da..8974bb340b3a 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -671,7 +671,6 @@ static int orion_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"%pOF has no valid 'reg' property (%d)\n",
np, status);
- status = 0;
continue;
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2a10b3f94ff7..2ce875764ca6 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1221,7 +1221,6 @@ static int rspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct rspi_data *rspi;
int ret;
- const struct of_device_id *of_id;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
@@ -1229,9 +1228,8 @@ static int rspi_probe(struct platform_device *pdev)
if (master == NULL)
return -ENOMEM;
- of_id = of_match_device(rspi_of_match, &pdev->dev);
- if (of_id) {
- ops = of_id->data;
+ ops = of_device_get_match_data(&pdev->dev);
+ if (ops) {
ret = rspi_parse_dt(&pdev->dev, master);
if (ret)
goto error1;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index b392cca8fa4f..de7df20f8712 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -752,7 +752,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
- struct s3c64xx_spi_info *sci;
int err;
sdd = spi_master_get_devdata(spi->master);
@@ -788,8 +787,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
spi_set_ctldata(spi, cs);
}
- sci = sdd->cntrlr_info;
-
pm_runtime_get_sync(&sdd->pdev->dev);
/* Check if we can provide the requested rate */
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0eb1e9583485..fcd261f98b9f 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -900,7 +900,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
break;
copy32 = copy_bswap32;
} else if (bits <= 16) {
- if (l & 1)
+ if (l & 3)
break;
copy32 = copy_wswap32;
} else {
@@ -1021,6 +1021,8 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data },
@@ -1188,12 +1190,10 @@ free_tx_chan:
static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
{
struct spi_master *master = p->master;
- struct device *dev;
if (!master->dma_tx)
return;
- dev = &p->pdev->dev;
dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
PAGE_SIZE, DMA_FROM_DEVICE);
dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
@@ -1209,15 +1209,13 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
struct resource *r;
struct spi_master *master;
const struct sh_msiof_chipdata *chipdata;
- const struct of_device_id *of_id;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
int i;
int ret;
- of_id = of_match_device(sh_msiof_match, &pdev->dev);
- if (of_id) {
- chipdata = of_id->data;
+ chipdata = of_device_get_match_data(&pdev->dev);
+ if (chipdata) {
info = sh_msiof_spi_parse_dt(&pdev->dev);
} else {
chipdata = (const void *)pdev->id_entry->driver_data;
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
new file mode 100644
index 000000000000..5993bdbf79e4
--- /dev/null
+++ b/drivers/spi/spi-sprd-adi.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+/* Registers definitions for ADI controller */
+#define REG_ADI_CTRL0 0x4
+#define REG_ADI_CHN_PRIL 0x8
+#define REG_ADI_CHN_PRIH 0xc
+#define REG_ADI_INT_EN 0x10
+#define REG_ADI_INT_RAW 0x14
+#define REG_ADI_INT_MASK 0x18
+#define REG_ADI_INT_CLR 0x1c
+#define REG_ADI_GSSI_CFG0 0x20
+#define REG_ADI_GSSI_CFG1 0x24
+#define REG_ADI_RD_CMD 0x28
+#define REG_ADI_RD_DATA 0x2c
+#define REG_ADI_ARM_FIFO_STS 0x30
+#define REG_ADI_STS 0x34
+#define REG_ADI_EVT_FIFO_STS 0x38
+#define REG_ADI_ARM_CMD_STS 0x3c
+#define REG_ADI_CHN_EN 0x40
+#define REG_ADI_CHN_ADDR(id) (0x44 + (id - 2) * 4)
+#define REG_ADI_CHN_EN1 0x20c
+
+/* Bits definitions for register REG_ADI_GSSI_CFG0 */
+#define BIT_CLK_ALL_ON BIT(30)
+
+/* Bits definitions for register REG_ADI_RD_DATA */
+#define BIT_RD_CMD_BUSY BIT(31)
+#define RD_ADDR_SHIFT 16
+#define RD_VALUE_MASK GENMASK(15, 0)
+#define RD_ADDR_MASK GENMASK(30, 16)
+
+/* Bits definitions for register REG_ADI_ARM_FIFO_STS */
+#define BIT_FIFO_FULL BIT(11)
+#define BIT_FIFO_EMPTY BIT(10)
+
+/*
+ * ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
+ * The slave devices address offset is always 0x8000 and size is 4K.
+ */
+#define ADI_SLAVE_ADDR_SIZE SZ_4K
+#define ADI_SLAVE_OFFSET 0x8000
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define ADI_HWSPINLOCK_TIMEOUT 5000
+/*
+ * ADI controller has 50 channels including 2 software channels
+ * and 48 hardware channels.
+ */
+#define ADI_HW_CHNS 50
+
+#define ADI_FIFO_DRAIN_TIMEOUT 1000
+#define ADI_READ_TIMEOUT 2000
+#define REG_ADDR_LOW_MASK GENMASK(11, 0)
+
+struct sprd_adi {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ void __iomem *base;
+ struct hwspinlock *hwlock;
+ unsigned long slave_vbase;
+ unsigned long slave_pbase;
+};
+
+static int sprd_adi_check_paddr(struct sprd_adi *sadi, u32 paddr)
+{
+ if (paddr < sadi->slave_pbase || paddr >
+ (sadi->slave_pbase + ADI_SLAVE_ADDR_SIZE)) {
+ dev_err(sadi->dev,
+ "slave physical address is incorrect, addr = 0x%x\n",
+ paddr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long sprd_adi_to_vaddr(struct sprd_adi *sadi, u32 paddr)
+{
+ return (paddr - sadi->slave_pbase + sadi->slave_vbase);
+}
+
+static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ u32 sts;
+
+ do {
+ sts = readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS);
+ if (sts & BIT_FIFO_EMPTY)
+ break;
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "drain write fifo timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
+{
+ return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
+}
+
+static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
+{
+ int read_timeout = ADI_READ_TIMEOUT;
+ u32 val, rd_addr;
+
+ /*
+ * Set the physical register address need to read into RD_CMD register,
+ * then ADI controller will start to transfer automatically.
+ */
+ writel_relaxed(reg_paddr, sadi->base + REG_ADI_RD_CMD);
+
+ /*
+ * Wait read operation complete, the BIT_RD_CMD_BUSY will be set
+ * simultaneously when writing read command to register, and the
+ * BIT_RD_CMD_BUSY will be cleared after the read operation is
+ * completed.
+ */
+ do {
+ val = readl_relaxed(sadi->base + REG_ADI_RD_DATA);
+ if (!(val & BIT_RD_CMD_BUSY))
+ break;
+
+ cpu_relax();
+ } while (--read_timeout);
+
+ if (read_timeout == 0) {
+ dev_err(sadi->dev, "ADI read timeout\n");
+ return -EBUSY;
+ }
+
+ /*
+ * The return value includes data and read register address, from bit 0
+ * to bit 15 are data, and from bit 16 to bit 30 are read register
+ * address. Then we can check the returned register address to validate
+ * data.
+ */
+ rd_addr = (val & RD_ADDR_MASK ) >> RD_ADDR_SHIFT;
+
+ if (rd_addr != (reg_paddr & REG_ADDR_LOW_MASK)) {
+ dev_err(sadi->dev, "read error, reg addr = 0x%x, val = 0x%x\n",
+ reg_paddr, val);
+ return -EIO;
+ }
+
+ *read_val = val & RD_VALUE_MASK;
+ return 0;
+}
+
+static int sprd_adi_write(struct sprd_adi *sadi, unsigned long reg, u32 val)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ int ret;
+
+ ret = sprd_adi_drain_fifo(sadi);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * we should wait for write fifo is empty before writing data to PMIC
+ * registers.
+ */
+ do {
+ if (!sprd_adi_fifo_is_full(sadi)) {
+ writel_relaxed(val, (void __iomem *)reg);
+ break;
+ }
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "write fifo is full\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi_dev,
+ struct spi_transfer *t)
+{
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+ unsigned long flags, virt_reg;
+ u32 phy_reg, val;
+ int ret;
+
+ if (t->rx_buf) {
+ phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase;
+
+ ret = sprd_adi_check_paddr(sadi, phy_reg);
+ if (ret)
+ return ret;
+
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+
+ ret = sprd_adi_read(sadi, phy_reg, &val);
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (ret)
+ return ret;
+
+ *(u32 *)t->rx_buf = val;
+ } else if (t->tx_buf) {
+ u32 *p = (u32 *)t->tx_buf;
+
+ /*
+ * Get the physical register address need to write and convert
+ * the physical address to virtual address. Since we need
+ * virtual register address to write.
+ */
+ phy_reg = *p++ + sadi->slave_pbase;
+ ret = sprd_adi_check_paddr(sadi, phy_reg);
+ if (ret)
+ return ret;
+
+ virt_reg = sprd_adi_to_vaddr(sadi, phy_reg);
+ val = *p;
+
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+
+ ret = sprd_adi_write(sadi, virt_reg, val);
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (ret)
+ return ret;
+ } else {
+ dev_err(sadi->dev, "no buffer for transfer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sprd_adi_hw_init(struct sprd_adi *sadi)
+{
+ struct device_node *np = sadi->dev->of_node;
+ int i, size, chn_cnt;
+ const __be32 *list;
+ u32 tmp;
+
+ /* Address bits select default 12 bits */
+ writel_relaxed(0, sadi->base + REG_ADI_CTRL0);
+
+ /* Set all channels as default priority */
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL);
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH);
+
+ /* Set clock auto gate mode */
+ tmp = readl_relaxed(sadi->base + REG_ADI_GSSI_CFG0);
+ tmp &= ~BIT_CLK_ALL_ON;
+ writel_relaxed(tmp, sadi->base + REG_ADI_GSSI_CFG0);
+
+ /* Set hardware channels setting */
+ list = of_get_property(np, "sprd,hw-channels", &size);
+ if (!list || !size) {
+ dev_info(sadi->dev, "no hw channels setting in node\n");
+ return;
+ }
+
+ chn_cnt = size / 8;
+ for (i = 0; i < chn_cnt; i++) {
+ u32 value;
+ u32 chn_id = be32_to_cpu(*list++);
+ u32 chn_config = be32_to_cpu(*list++);
+
+ /* Channel 0 and 1 are software channels */
+ if (chn_id < 2)
+ continue;
+
+ writel_relaxed(chn_config, sadi->base +
+ REG_ADI_CHN_ADDR(chn_id));
+
+ if (chn_id < 32) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN);
+ value |= BIT(chn_id);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN);
+ } else if (chn_id < ADI_HW_CHNS) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN1);
+ value |= BIT(chn_id - 32);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN1);
+ }
+ }
+}
+
+static int sprd_adi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_controller *ctlr;
+ struct sprd_adi *sadi;
+ struct resource *res;
+ u32 num_chipselect;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "can not find the adi bus node\n");
+ return -ENODEV;
+ }
+
+ pdev->id = of_alias_get_id(np, "spi");
+ num_chipselect = of_get_child_count(np);
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+ sadi = spi_controller_get_devdata(ctlr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sadi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sadi->base)) {
+ ret = PTR_ERR(sadi->base);
+ goto put_ctlr;
+ }
+
+ sadi->slave_vbase = (unsigned long)sadi->base + ADI_SLAVE_OFFSET;
+ sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
+ sadi->ctlr = ctlr;
+ sadi->dev = &pdev->dev;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not get the hardware spinlock\n");
+ goto put_ctlr;
+ }
+
+ sadi->hwlock = hwspin_lock_request_specific(ret);
+ if (!sadi->hwlock) {
+ ret = -ENXIO;
+ goto put_ctlr;
+ }
+
+ sprd_adi_hw_init(sadi);
+
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = num_chipselect;
+ ctlr->flags = SPI_MASTER_HALF_DUPLEX;
+ ctlr->bits_per_word_mask = 0;
+ ctlr->transfer_one = sprd_adi_transfer_one;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register SPI controller\n");
+ goto free_hwlock;
+ }
+
+ return 0;
+
+free_hwlock:
+ hwspin_lock_free(sadi->hwlock);
+put_ctlr:
+ spi_controller_put(ctlr);
+ return ret;
+}
+
+static int sprd_adi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+
+ hwspin_lock_free(sadi->hwlock);
+ return 0;
+}
+
+static const struct of_device_id sprd_adi_of_match[] = {
+ {
+ .compatible = "sprd,sc9860-adi",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_adi_of_match);
+
+static struct platform_driver sprd_adi_driver = {
+ .driver = {
+ .name = "sprd-adi",
+ .of_match_table = sprd_adi_of_match,
+ },
+ .probe = sprd_adi_probe,
+ .remove = sprd_adi_remove,
+};
+module_platform_driver(sprd_adi_driver);
+
+MODULE_DESCRIPTION("Spreadtrum ADI Controller Driver");
+MODULE_AUTHOR("Baolin Wang <Baolin.Wang@spreadtrum.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 44550182a4a3..a76acedd7e2f 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -50,7 +50,7 @@
#define SPI_IDLE_SDA_PULL_LOW (2 << 18)
#define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
#define SPI_IDLE_SDA_MASK (3 << 18)
-#define SPI_CS_SS_VAL (1 << 20)
+#define SPI_CS_SW_VAL (1 << 20)
#define SPI_CS_SW_HW (1 << 21)
/* SPI_CS_POL_INACTIVE bits are default high */
/* n from 0 to 3 */
@@ -705,9 +705,9 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
command1 |= SPI_CS_SW_HW;
if (spi->mode & SPI_CS_HIGH)
- command1 |= SPI_CS_SS_VAL;
+ command1 |= SPI_CS_SW_VAL;
else
- command1 &= ~SPI_CS_SS_VAL;
+ command1 &= ~SPI_CS_SW_VAL;
tegra_spi_writel(tspi, 0, SPI_COMMAND2);
} else {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e8b5a5e21b2e..b33a727a0158 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2200,7 +2200,7 @@ static void devm_spi_unregister(struct device *dev, void *res)
* Context: can sleep
*
* Register a SPI device as with spi_register_controller() which will
- * automatically be unregister
+ * automatically be unregistered and freed.
*
* Return: zero on success, else a negative error code.
*/
@@ -2241,15 +2241,18 @@ static int __unregister(struct device *dev, void *null)
* only ones directly touching chip registers.
*
* This must be called from context that can sleep.
+ *
+ * Note that this function also drops a reference to the controller.
*/
void spi_unregister_controller(struct spi_controller *ctlr)
{
struct spi_controller *found;
+ int id = ctlr->bus_num;
int dummy;
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
- found = idr_find(&spi_master_idr, ctlr->bus_num);
+ found = idr_find(&spi_master_idr, id);
mutex_unlock(&board_lock);
if (found != ctlr) {
dev_dbg(&ctlr->dev,
@@ -2269,7 +2272,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
device_unregister(&ctlr->dev);
/* free bus id */
mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
+ idr_remove(&spi_master_idr, id);
mutex_unlock(&board_lock);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/fs/file_table.c b/fs/file_table.c
index 61517f57f8ef..49e1f2f1a4cb 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -201,11 +201,11 @@ static void __fput(struct file *file)
eventpoll_release(file);
locks_remove_file(file);
+ ima_file_free(file);
if (unlikely(file->f_flags & FASYNC)) {
if (file->f_op->fasync)
file->f_op->fasync(-1, file, 0);
}
- ima_file_free(file);
if (file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index d2013064dc69..dc9726fdac8f 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -26,6 +26,20 @@
#if BITS_PER_LONG == 64
+/**
+ * do_div - returns 2 values: calculate remainder and update new dividend
+ * @n: pointer to uint64_t dividend (will be updated)
+ * @base: uint32_t divisor
+ *
+ * Summary:
+ * ``uint32_t remainder = *n % base;``
+ * ``*n = *n / base;``
+ *
+ * Return: (uint32_t)remainder
+ *
+ * NOTE: macro parameter @n is evaluated multiple times,
+ * beware of side effects!
+ */
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 19748a5b0e77..3489253e38fc 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -22,65 +22,74 @@
* See lib/bitmap.c for more details.
*/
-/*
+/**
+ * DOC: bitmap overview
+ *
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
*
- * bitmap_zero(dst, nbits) *dst = 0UL
- * bitmap_fill(dst, nbits) *dst = ~0UL
- * bitmap_copy(dst, src, nbits) *dst = *src
- * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
- * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
- * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
- * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
- * bitmap_complement(dst, src, nbits) *dst = ~(*src)
- * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
- * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
- * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
- * bitmap_empty(src, nbits) Are all bits zero in *src?
- * bitmap_full(src, nbits) Are all bits set in *src?
- * bitmap_weight(src, nbits) Hamming Weight: number set bits
- * bitmap_set(dst, pos, nbits) Set specified bit area
- * bitmap_clear(dst, pos, nbits) Clear specified bit area
- * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
- * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
- * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
- * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
- * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
- * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
- * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
- * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
- * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
- * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
- * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
- * bitmap_release_region(bitmap, pos, order) Free specified bit region
- * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
- * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
- * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
+ * ::
+ *
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
+ * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
+ * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
+ * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
+ * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
+ * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
+ * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
+ * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
+ * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
+ * bitmap_release_region(bitmap, pos, order) Free specified bit region
+ * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
+ * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
+ *
*/
-/*
- * Also the following operations in asm/bitops.h apply to bitmaps.
+/**
+ * DOC: bitmap bitops
+ *
+ * Also the following operations in asm/bitops.h apply to bitmaps.::
+ *
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*
- * set_bit(bit, addr) *addr |= bit
- * clear_bit(bit, addr) *addr &= ~bit
- * change_bit(bit, addr) *addr ^= bit
- * test_bit(bit, addr) Is bit set in *addr?
- * test_and_set_bit(bit, addr) Set bit and return old value
- * test_and_clear_bit(bit, addr) Clear bit and return old value
- * test_and_change_bit(bit, addr) Change bit and return old value
- * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
- * find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
- * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
-/*
+/**
+ * DOC: declare bitmap
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
* to declare an array named 'name' of just enough unsigned longs to
* contain all bit positions from 0 to 'bits' - 1.
@@ -361,8 +370,9 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
-/*
+/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
+ * @n: u64 value
*
* Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit
* integers in 32-bit environment, and 64-bit integers in 64-bit one.
@@ -393,14 +403,14 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
((unsigned long) ((u64)(n) >> 32))
#endif
-/*
+/**
* bitmap_from_u64 - Check and swap words within u64.
* @mask: source bitmap
* @dst: destination bitmap
*
- * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*]
+ * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]``
* to read u64 mask, we will get the wrong word.
- * That is "(u32 *)(&val)[0]" gets the upper 32 bits,
+ * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 885266aae2d7..e1f75a3b4af5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2793,6 +2793,7 @@ extern int do_pipe_flags(int *, int);
id(KEXEC_IMAGE, kexec-image) \
id(KEXEC_INITRAMFS, kexec-initramfs) \
id(POLICY, security-policy) \
+ id(X509_CERTIFICATE, x509-certificate) \
id(MAX_ID, )
#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
diff --git a/include/linux/gpio-fan.h b/include/linux/gpio-fan.h
deleted file mode 100644
index 096659169215..000000000000
--- a/include/linux/gpio-fan.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * include/linux/gpio-fan.h
- *
- * Platform data structure for GPIO fan driver
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LINUX_GPIO_FAN_H
-#define __LINUX_GPIO_FAN_H
-
-struct gpio_fan_alarm {
- unsigned gpio;
- unsigned active_low;
-};
-
-struct gpio_fan_speed {
- int rpm;
- int ctrl_val;
-};
-
-struct gpio_fan_platform_data {
- int num_ctrl;
- unsigned *ctrl; /* fan control GPIOs. */
- struct gpio_fan_alarm *alarm; /* fan alarm GPIO. */
- /*
- * Speed conversion array: rpm from/to GPIO bit field.
- * This array _must_ be sorted in ascending rpm order.
- */
- int num_speed;
- struct gpio_fan_speed *speed;
-};
-
-#endif /* __LINUX_GPIO_FAN_H */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 11dd93e42580..708f337d780b 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -14,6 +14,12 @@
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+#ifndef CONFIG_64BIT
+# define KALLSYM_FMT "%08lx"
+#else
+# define KALLSYM_FMT "%016lx"
+#endif
+
struct module;
#ifdef CONFIG_KALLSYMS
@@ -46,6 +52,9 @@ extern void __print_symbol(const char *fmt, unsigned long address);
int lookup_symbol_name(unsigned long addr, char *symname);
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
+/* How and when do we show kallsyms values? */
+extern int kallsyms_show_value(void);
+
#else /* !CONFIG_KALLSYMS */
static inline unsigned long kallsyms_lookup_name(const char *name)
@@ -104,6 +113,11 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
return -ERANGE;
}
+static inline int kallsyms_show_value(void)
+{
+ return false;
+}
+
/* Stupid that this does nothing, but I didn't create this mess. */
#define __print_symbol(fmt, addr)
#endif /*CONFIG_KALLSYMS*/
diff --git a/include/linux/log2.h b/include/linux/log2.h
index c373295f359f..41a1ae010993 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -37,19 +37,23 @@ int __ilog2_u64(u64 n)
}
#endif
-/*
- * Determine whether some value is a power of two, where zero is
+/**
+ * is_power_of_2() - check if a value is a power of two
+ * @n: the value to check
+ *
+ * Determine whether some value is a power of two, where zero is
* *not* considered a power of two.
+ * Return: true if @n is a power of 2, otherwise false.
*/
-
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
-/*
- * round up to nearest power of two
+/**
+ * __roundup_pow_of_two() - round up to nearest power of two
+ * @n: value to round up
*/
static inline __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
@@ -57,8 +61,9 @@ unsigned long __roundup_pow_of_two(unsigned long n)
return 1UL << fls_long(n - 1);
}
-/*
- * round down to nearest power of two
+/**
+ * __rounddown_pow_of_two() - round down to nearest power of two
+ * @n: value to round down
*/
static inline __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
@@ -67,12 +72,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
}
/**
- * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
- * @n - parameter
+ * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * @n: parameter
*
* constant-capable log of base 2 calculation
* - this can be used to initialise global variables from constant data, hence
- * the massive ternary operator construction
+ * the massive ternary operator construction
*
* selects the appropriately-sized optimised version depending on sizeof(n)
*/
@@ -150,7 +155,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
/**
* roundup_pow_of_two - round the given value up to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value up to the nearest power of two
* - the result is undefined when n == 0
@@ -167,7 +172,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
/**
* rounddown_pow_of_two - round the given value down to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value down to the nearest power of two
* - the result is undefined when n == 0
@@ -180,6 +185,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
__rounddown_pow_of_two(n) \
)
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+ return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
+
/**
* order_base_2 - calculate the (rounded up) base 2 order of the argument
* @n: parameter
@@ -193,13 +204,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ob2(5) = 3
* ... and so on.
*/
-
-static inline __attribute_const__
-int __order_base_2(unsigned long n)
-{
- return n > 1 ? ilog2(n - 1) + 1 : 0;
-}
-
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 082de345b73c..837f2f2d1d34 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -12,6 +12,11 @@
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ * @remainder: pointer to unsigned 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*
* This is commonly provided by 32bit archs to provide an optimized 64bit
* divide.
@@ -24,6 +29,11 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
/**
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ * @remainder: pointer to signed 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
@@ -33,6 +43,11 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
/**
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ * @remainder: pointer to unsigned 64bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
@@ -42,6 +57,10 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
{
@@ -50,6 +69,10 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
/**
* div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline s64 div64_s64(s64 dividend, s64 divisor)
{
@@ -89,6 +112,8 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
/**
* div_u64 - unsigned 64bit divide with 32bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
*
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
@@ -104,6 +129,8 @@ static inline u64 div_u64(u64 dividend, u32 divisor)
/**
* div_s64 - signed 64bit divide with 32bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index e9c908c4fba8..78dc85365c4f 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -131,6 +131,9 @@ enum axp20x_variants {
#define AXP803_DCDC6_V_OUT 0x25
#define AXP803_DCDC_FREQ_CTRL 0x3b
+/* Other DCDC regulator control registers are the same as AXP803 */
+#define AXP813_DCDC7_V_OUT 0x26
+
/* Interrupt */
#define AXP152_IRQ1_EN 0x40
#define AXP152_IRQ2_EN 0x41
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 116816fb9110..7815d8db7eca 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -334,6 +334,7 @@
#define DCM_DRP_RD_DATA_H 0xFC29
#define SD_VPCLK0_CTL 0xFC2A
#define SD_VPCLK1_CTL 0xFC2B
+#define PHASE_SELECT_MASK 0x1F
#define SD_DCMPS0_CTL 0xFC2C
#define SD_DCMPS1_CTL 0xFC2D
#define SD_VPTX_CTL SD_VPCLK0_CTL
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index bccd2d68b1e3..f069c518c0ed 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -246,24 +246,6 @@ enum tps65218_irqs {
};
/**
- * struct tps_info - packages regulator constraints
- * @id: Id of the regulator
- * @name: Voltage regulator name
- * @min_uV: minimum micro volts
- * @max_uV: minimum micro volts
- * @strobe: sequencing strobe value for the regulator
- *
- * This data is used to check the regualtor voltage limits while setting.
- */
-struct tps_info {
- int id;
- const char *name;
- int min_uV;
- int max_uV;
- int strobe;
-};
-
-/**
* struct tps65218 - tps65218 sub-driver chip access routines
*
* Device data may be used to access the TPS65218 chip
@@ -280,7 +262,6 @@ struct tps65218 {
u32 irq_mask;
struct regmap_irq_chip_data *irq_data;
struct regulator_desc desc[TPS65218_NUM_REGULATOR];
- struct tps_info *info[TPS65218_NUM_REGULATOR];
struct regmap *regmap;
u8 *strobes;
};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9a43763a68ad..e7743eca1021 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -255,6 +255,10 @@ struct mmc_supply {
struct regulator *vqmmc; /* Optional Vccq supply */
};
+struct mmc_ctx {
+ struct task_struct *task;
+};
+
struct mmc_host {
struct device *parent;
struct device class_dev;
@@ -350,6 +354,8 @@ struct mmc_host {
#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
+ int fixed_drv_type; /* fixed driver type for non-removable media */
+
mmc_pm_flag_t pm_caps; /* supported pm features */
/* host specific block data */
@@ -388,8 +394,9 @@ struct mmc_host {
struct mmc_card *card; /* device attached to this host */
wait_queue_head_t wq;
- struct task_struct *claimer; /* task that has host claimed */
+ struct mmc_ctx *claimer; /* context that has host claimed */
int claim_cnt; /* "claim" nesting count */
+ struct mmc_ctx default_ctx; /* default context */
struct delayed_work detect;
int detect_change; /* card detect flag */
@@ -469,6 +476,8 @@ void mmc_detect_change(struct mmc_host *, unsigned long delay);
void mmc_request_done(struct mmc_host *, struct mmc_request *);
void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
+
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
host->ops->enable_sdio_irq(host, 0);
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
index 36f986d4a59a..1d42872d22f3 100644
--- a/include/linux/mmc/sdhci-pci-data.h
+++ b/include/linux/mmc/sdhci-pci-data.h
@@ -15,7 +15,4 @@ struct sdhci_pci_data {
extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
int slotno);
-
-extern int sdhci_pci_spt_drive_strength;
-
#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index fe5aa3736707..c69b49abe877 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -639,6 +639,8 @@ static inline bool is_livepatch_module(struct module *mod)
}
#endif /* CONFIG_LIVEPATCH */
+bool is_module_sig_enforced(void);
+
#else /* !CONFIG_MODULES... */
static inline struct module *__module_address(unsigned long addr)
@@ -753,6 +755,11 @@ static inline bool module_requested_async_probing(struct module *module)
return false;
}
+static inline bool is_module_sig_enforced(void)
+{
+ return false;
+}
+
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h
deleted file mode 100644
index 12289c1e9413..000000000000
--- a/include/linux/platform_data/sht15.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * sht15.h - support for the SHT15 Temperature and Humidity Sensor
- *
- * Copyright (c) 2009 Jonathan Cameron
- *
- * Copyright (c) 2007 Wouter Horre
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * For further information, see the Documentation/hwmon/sht15 file.
- */
-
-#ifndef _PDATA_SHT15_H
-#define _PDATA_SHT15_H
-
-/**
- * struct sht15_platform_data - sht15 connectivity info
- * @gpio_data: no. of gpio to which bidirectional data line is
- * connected.
- * @gpio_sck: no. of gpio to which the data clock is connected.
- * @supply_mv: supply voltage in mv. Overridden by regulator if
- * available.
- * @checksum: flag to indicate the checksum should be validated.
- * @no_otp_reload: flag to indicate no reload from OTP.
- * @low_resolution: flag to indicate the temp/humidity resolution to use.
- */
-struct sht15_platform_data {
- int gpio_data;
- int gpio_sck;
- int supply_mv;
- bool checksum;
- bool no_otp_reload;
- bool low_resolution;
-};
-
-#endif /* _PDATA_SHT15_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 335926039adc..905bba92f015 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -189,7 +189,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
extern int printk_delay_msec;
extern int dmesg_restrict;
-extern int kptr_restrict;
extern int
devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf,
@@ -280,6 +279,8 @@ static inline void printk_safe_flush_on_panic(void)
}
#endif
+extern int kptr_restrict;
+
extern asmlinkage void dump_stack(void) __cold;
#ifndef pr_fmt
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 978abfbac617..15eddc1353ba 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -120,21 +120,65 @@ struct reg_sequence {
*/
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __sleep_us = (sleep_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int __ret; \
+ might_sleep_if(__sleep_us); \
+ for (;;) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ if (__ret) \
+ break; \
+ if (cond) \
+ break; \
+ if ((__timeout_us) && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ break; \
+ } \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
+ } \
+ __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
+/**
+ * regmap_field_read_poll_timeout - Poll until a condition is met or timeout
+ *
+ * @field: Regmap field to read from
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val. Must not be called
+ * from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __sleep_us = (sleep_us); \
+ ktime_t timeout = ktime_add_us(ktime_get(), __timeout_us); \
int pollret; \
- might_sleep_if(sleep_us); \
+ might_sleep_if(__sleep_us); \
for (;;) { \
- pollret = regmap_read((map), (addr), &(val)); \
+ pollret = regmap_field_read((field), &(val)); \
if (pollret) \
break; \
if (cond) \
break; \
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- pollret = regmap_read((map), (addr), &(val)); \
+ if (__timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ pollret = regmap_field_read((field), &(val)); \
break; \
} \
- if (sleep_us) \
- usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
} \
pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
})
@@ -273,6 +317,9 @@ typedef void (*regmap_unlock)(void *);
*
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
+ * @hwlock_id: Specify the hardware spinlock id.
+ * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
+ * HWLOCK_IRQ or 0.
*/
struct regmap_config {
const char *name;
@@ -317,6 +364,9 @@ struct regmap_config {
const struct regmap_range_cfg *ranges;
unsigned int num_ranges;
+
+ unsigned int hwlock_id;
+ unsigned int hwlock_mode;
};
/**
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 80cb40b7c88d..f2fd2d3bf58f 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,6 +1,6 @@
/*
* da9211.h - Regulator device driver for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
@@ -25,8 +25,11 @@ enum da9211_chip_id {
DA9211,
DA9212,
DA9213,
+ DA9223,
DA9214,
+ DA9224,
DA9215,
+ DA9225,
};
struct da9211_pdata {
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 72299ef00061..d448a4804aea 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3770,6 +3770,13 @@ static inline void nf_reset_trace(struct sk_buff *skb)
#endif
}
+static inline void ipvs_reset(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IP_VS)
+ skb->ipvs_property = 0;
+#endif
+}
+
/* Note: This doesn't put any conntrack and bridge info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
diff --git a/include/linux/spi/spi-fsl-dspi.h b/include/linux/spi/spi-fsl-dspi.h
new file mode 100644
index 000000000000..74c9bae20bf2
--- /dev/null
+++ b/include/linux/spi/spi-fsl-dspi.h
@@ -0,0 +1,31 @@
+/*
+ * Freescale DSPI controller driver
+ *
+ * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SPI_FSL_DSPI_HEADER_H
+#define SPI_FSL_DSPI_HEADER_H
+
+/**
+ * struct fsl_dspi_platform_data - platform data for the Freescale DSPI driver
+ * @bus_num: board specific identifier for this DSPI driver.
+ * @cs_num: number of chip selects supported by this DSPI driver.
+ */
+struct fsl_dspi_platform_data {
+ u32 cs_num;
+ u32 bus_num;
+ u32 sck_cs_delay;
+ u32 cs_sck_delay;
+};
+
+#endif /* SPI_FSL_DSPI_HEADER_H */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 74f91eefeccf..b769ecfcc3bd 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -213,6 +213,11 @@ static inline struct ctl_table_header *register_sysctl_paths(
return NULL;
}
+static inline struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table)
+{
+ return NULL;
+}
+
static inline void unregister_sysctl_table(struct ctl_table_header * table)
{
}
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 1e6df0eb058f..a10a3b1813f3 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -14,7 +14,6 @@
struct tcf_idrinfo {
spinlock_t lock;
struct idr action_idr;
- struct net *net;
};
struct tc_action_ops;
@@ -106,7 +105,7 @@ struct tc_action_net {
static inline
int tc_action_net_init(struct tc_action_net *tn,
- const struct tc_action_ops *ops, struct net *net)
+ const struct tc_action_ops *ops)
{
int err = 0;
@@ -114,7 +113,6 @@ int tc_action_net_init(struct tc_action_net *tn,
if (!tn->idrinfo)
return -ENOMEM;
tn->ops = ops;
- tn->idrinfo->net = net;
spin_lock_init(&tn->idrinfo->lock);
idr_init(&tn->idrinfo->action_idr);
return err;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 70ca2437740e..8826747ef83e 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -94,6 +94,7 @@ struct tcf_exts {
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
int nr_actions;
struct tc_action **actions;
+ struct net *net;
#endif
/* Map to export classifier specific extension TLV types to the
* generic extensions API. Unsupported extensions must be set to 0.
@@ -107,6 +108,7 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
#ifdef CONFIG_NET_CLS_ACT
exts->type = 0;
exts->nr_actions = 0;
+ exts->net = NULL;
exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
GFP_KERNEL);
if (!exts->actions)
@@ -117,6 +119,28 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
return 0;
}
+/* Return false if the netns is being destroyed in cleanup_net(). Callers
+ * need to do cleanup synchronously in this case, otherwise may race with
+ * tc_action_net_exit(). Return true for other cases.
+ */
+static inline bool tcf_exts_get_net(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ exts->net = maybe_get_net(exts->net);
+ return exts->net != NULL;
+#else
+ return true;
+#endif
+}
+
+static inline void tcf_exts_put_net(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (exts->net)
+ put_net(exts->net);
+#endif
+}
+
static inline void tcf_exts_to_list(const struct tcf_exts *exts,
struct list_head *actions)
{
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index feb58d455560..4b9ee3009aa0 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
#define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200
/* max delivery path length */
-#define SNDRV_SEQ_MAX_HOPS 10
+/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
+#define SNDRV_SEQ_MAX_HOPS 8
/* max size of event size */
#define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff
diff --git a/include/sound/timer.h b/include/sound/timer.h
index c4d76ff056c6..7ae226ab6990 100644
--- a/include/sound/timer.h
+++ b/include/sound/timer.h
@@ -90,6 +90,8 @@ struct snd_timer {
struct list_head ack_list_head;
struct list_head sack_list_head; /* slow ack list head */
struct tasklet_struct task_queue;
+ int max_instances; /* upper limit of timer instances */
+ int num_instances; /* current number of timer instances */
};
struct snd_timer_instance {
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 6598fb76d2c2..9816590d3ad2 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -829,6 +829,7 @@ struct drm_i915_gem_exec_fence {
#define I915_EXEC_FENCE_WAIT (1<<0)
#define I915_EXEC_FENCE_SIGNAL (1<<1)
+#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
__u32 flags;
};
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index a92be0f492a9..c1395b5bd432 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -66,6 +66,9 @@
#define XATTR_NAME_SMACKTRANSMUTE XATTR_SECURITY_PREFIX XATTR_SMACK_TRANSMUTE
#define XATTR_NAME_SMACKMMAP XATTR_SECURITY_PREFIX XATTR_SMACK_MMAP
+#define XATTR_APPARMOR_SUFFIX "apparmor"
+#define XATTR_NAME_APPARMOR XATTR_SECURITY_PREFIX XATTR_APPARMOR_SUFFIX
+
#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 127e7cfafa55..1e6ae66c6244 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -480,6 +480,7 @@ struct kallsym_iter {
char name[KSYM_NAME_LEN];
char module_name[MODULE_NAME_LEN];
int exported;
+ int show_value;
};
static int get_ksymbol_mod(struct kallsym_iter *iter)
@@ -582,12 +583,15 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p)
{
+ unsigned long value;
struct kallsym_iter *iter = m->private;
/* Some debugging symbols have no name. Ignore them. */
if (!iter->name[0])
return 0;
+ value = iter->show_value ? iter->value : 0;
+
if (iter->module_name[0]) {
char type;
@@ -597,10 +601,10 @@ static int s_show(struct seq_file *m, void *p)
*/
type = iter->exported ? toupper(iter->type) :
tolower(iter->type);
- seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
+ seq_printf(m, KALLSYM_FMT " %c %s\t[%s]\n", value,
type, iter->name, iter->module_name);
} else
- seq_printf(m, "%pK %c %s\n", (void *)iter->value,
+ seq_printf(m, KALLSYM_FMT " %c %s\n", value,
iter->type, iter->name);
return 0;
}
@@ -612,6 +616,40 @@ static const struct seq_operations kallsyms_op = {
.show = s_show
};
+static inline int kallsyms_for_perf(void)
+{
+#ifdef CONFIG_PERF_EVENTS
+ extern int sysctl_perf_event_paranoid;
+ if (sysctl_perf_event_paranoid <= 1)
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * We show kallsyms information even to normal users if we've enabled
+ * kernel profiling and are explicitly not paranoid (so kptr_restrict
+ * is clear, and sysctl_perf_event_paranoid isn't set).
+ *
+ * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
+ * block even that).
+ */
+int kallsyms_show_value(void)
+{
+ switch (kptr_restrict) {
+ case 0:
+ if (kallsyms_for_perf())
+ return 1;
+ /* fallthrough */
+ case 1:
+ if (has_capability_noaudit(current, CAP_SYSLOG))
+ return 1;
+ /* fallthrough */
+ default:
+ return 0;
+ }
+}
+
static int kallsyms_open(struct inode *inode, struct file *file)
{
/*
@@ -625,6 +663,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
return -ENOMEM;
reset_iter(iter, 0);
+ iter->show_value = kallsyms_show_value();
return 0;
}
diff --git a/kernel/module.c b/kernel/module.c
index de66ec825992..32c2cdaccd93 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -278,6 +278,16 @@ static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
module_param(sig_enforce, bool_enable_only, 0644);
#endif /* !CONFIG_MODULE_SIG_FORCE */
+/*
+ * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
+ * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
+ */
+bool is_module_sig_enforced(void)
+{
+ return sig_enforce;
+}
+EXPORT_SYMBOL(is_module_sig_enforced);
+
/* Block module loading/unloading? */
int modules_disabled = 0;
core_param(nomodule, modules_disabled, bint, 0);
@@ -1516,7 +1526,7 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
sattr->mattr.show = module_sect_show;
sattr->mattr.store = NULL;
sattr->mattr.attr.name = sattr->name;
- sattr->mattr.attr.mode = S_IRUGO;
+ sattr->mattr.attr.mode = S_IRUSR;
*(gattr++) = &(sattr++)->mattr.attr;
}
*gattr = NULL;
@@ -4147,6 +4157,7 @@ static int m_show(struct seq_file *m, void *p)
{
struct module *mod = list_entry(p, struct module, list);
char buf[MODULE_FLAGS_BUF_SIZE];
+ unsigned long value;
/* We always ignore unformed modules. */
if (mod->state == MODULE_STATE_UNFORMED)
@@ -4162,7 +4173,8 @@ static int m_show(struct seq_file *m, void *p)
mod->state == MODULE_STATE_COMING ? "Loading" :
"Live");
/* Used by oprofile and other similar tools. */
- seq_printf(m, " 0x%pK", mod->core_layout.base);
+ value = m->private ? 0 : (unsigned long)mod->core_layout.base;
+ seq_printf(m, " 0x" KALLSYM_FMT, value);
/* Taints info */
if (mod->taints)
@@ -4184,9 +4196,23 @@ static const struct seq_operations modules_op = {
.show = m_show
};
+/*
+ * This also sets the "private" pointer to non-NULL if the
+ * kernel pointers should be hidden (so you can just test
+ * "m->private" to see if you should keep the values private).
+ *
+ * We use the same logic as for /proc/kallsyms.
+ */
static int modules_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &modules_op);
+ int err = seq_open(file, &modules_op);
+
+ if (!err) {
+ struct seq_file *m = file->private_data;
+ m->private = kallsyms_show_value() ? NULL : (void *)8ul;
+ }
+
+ return 0;
}
static const struct file_operations proc_modules_operations = {
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 9209d83ecdcf..ba0da243fdd8 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -649,6 +649,7 @@ static int sugov_start(struct cpufreq_policy *policy)
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
memset(sg_cpu, 0, sizeof(*sg_cpu));
+ sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy;
sg_cpu->flags = SCHED_CPUFREQ_RT;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
@@ -714,11 +715,6 @@ struct cpufreq_governor *cpufreq_default_governor(void)
static int __init sugov_register(void)
{
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(sugov_cpu, cpu).cpu = cpu;
-
return cpufreq_register_governor(&schedutil_gov);
}
fs_initcall(sugov_register);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index efdd72e15794..d390d1be3748 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include <linux/preempt.h>
struct worker_pool;
@@ -60,7 +61,7 @@ struct worker {
*/
static inline struct worker *current_wq_worker(void)
{
- if (current->flags & PF_WQ_WORKER)
+ if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current);
return NULL;
}
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index fef5d2e114be..1ef0cec38d78 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -228,7 +228,7 @@ next_op:
hdr = 2;
/* Extract a tag from the data */
- if (unlikely(dp >= datalen - 1))
+ if (unlikely(datalen - dp < 2))
goto data_overrun_error;
tag = data[dp++];
if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
@@ -274,7 +274,7 @@ next_op:
int n = len - 0x80;
if (unlikely(n > 2))
goto length_too_long;
- if (unlikely(dp >= datalen - n))
+ if (unlikely(n > datalen - dp))
goto data_overrun_error;
hdr += n;
for (len = 0; n > 0; n--) {
diff --git a/lib/bitmap.c b/lib/bitmap.c
index c82c61b66e16..d8f0c094b18e 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -18,7 +18,9 @@
#include <asm/page.h>
-/*
+/**
+ * DOC: bitmap introduction
+ *
* bitmaps provide an array of bits, implemented using an an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
diff --git a/lib/crc32.c b/lib/crc32.c
index 6ddc92bc1460..2ef20fe84b69 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -225,7 +225,7 @@ static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
}
/**
- * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time
+ * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time
* @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
* @len: The number of bytes. @crc is multiplied by x^(8*@len)
* @polynomial: The modulus used to reduce the result to 32 bits.
diff --git a/lib/crc4.c b/lib/crc4.c
index cf6db46661be..164ed9444cd3 100644
--- a/lib/crc4.c
+++ b/lib/crc4.c
@@ -15,7 +15,7 @@ static const uint8_t crc4_tab[] = {
/**
* crc4 - calculate the 4-bit crc of a value.
- * @crc: starting crc4
+ * @c: starting crc4
* @x: value to checksum
* @bits: number of bits in @x to checksum
*
diff --git a/lib/crc8.c b/lib/crc8.c
index 87b59cafdb83..595a5a75e3cd 100644
--- a/lib/crc8.c
+++ b/lib/crc8.c
@@ -20,11 +20,11 @@
#include <linux/crc8.h>
#include <linux/printk.h>
-/*
+/**
* crc8_populate_msb - fill crc table for given polynomial in reverse bit order.
*
- * table: table to be filled.
- * polynomial: polynomial for which table is to be filled.
+ * @table: table to be filled.
+ * @polynomial: polynomial for which table is to be filled.
*/
void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
{
@@ -42,11 +42,11 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
}
EXPORT_SYMBOL(crc8_populate_msb);
-/*
+/**
* crc8_populate_lsb - fill crc table for given polynomial in regular bit order.
*
- * table: table to be filled.
- * polynomial: polynomial for which table is to be filled.
+ * @table: table to be filled.
+ * @polynomial: polynomial for which table is to be filled.
*/
void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
{
@@ -63,13 +63,13 @@ void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
}
EXPORT_SYMBOL(crc8_populate_lsb);
-/*
+/**
* crc8 - calculate a crc8 over the given input data.
*
- * table: crc table used for calculation.
- * pdata: pointer to data buffer.
- * nbytes: number of bytes in data buffer.
- * crc: previous returned crc8 value.
+ * @table: crc table used for calculation.
+ * @pdata: pointer to data buffer.
+ * @nbytes: number of bytes in data buffer.
+ * @crc: previous returned crc8 value.
*/
u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc)
{
diff --git a/lib/div64.c b/lib/div64.c
index 58e2a404097e..01c8602bb6ff 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -61,6 +61,12 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
EXPORT_SYMBOL(__div64_32);
#endif
+/**
+ * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ * @remainder: 64bit remainder
+ */
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
diff --git a/lib/gcd.c b/lib/gcd.c
index 135ee6407a5e..227dea924425 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -13,6 +13,12 @@
#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) && !defined(CPU_NO_EFFICIENT_FFS)
/* If __ffs is available, the even/odd algorithm benchmarks slower. */
+
+/**
+ * gcd - calculate and return the greatest common divisor of 2 unsigned longs
+ * @a: first value
+ * @b: second value
+ */
unsigned long gcd(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 9649579b5b9f..4a72ee4e2ae9 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
dev->name);
vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
}
+ if (event == NETDEV_DOWN &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
@@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
struct net_device *tmp;
LIST_HEAD(close_list);
- if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
- vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
-
/* Put all VLANs for this dev in the down state too. */
vlan_group_for_each_dev(grp, i, vlandev) {
flgs = vlandev->flags;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 24656076906d..e140ba49b30a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4864,6 +4864,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
if (!xnet)
return;
+ ipvs_reset(skb);
skb_orphan(skb);
skb->mark = 0;
}
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index e6c06aa349a6..1e2929f4290a 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -133,6 +133,8 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
if (err)
return err;
}
+
+ return 0;
}
for_each_set_bit(port, group, ds->num_ports)
@@ -180,6 +182,8 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds,
if (err)
return err;
}
+
+ return 0;
}
for_each_set_bit(port, members, ds->num_ports)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5a87a00641d3..b6bb3cdfad09 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -115,7 +115,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
-#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
+#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK)
#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
@@ -2615,7 +2615,6 @@ void tcp_simple_retransmit(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
unsigned int mss = tcp_current_mss(sk);
- u32 prior_lost = tp->lost_out;
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
@@ -2632,7 +2631,7 @@ void tcp_simple_retransmit(struct sock *sk)
tcp_clear_retrans_hints_partial(tp);
- if (prior_lost == tp->lost_out)
+ if (!tp->lost_out)
return;
if (tcp_is_reno(tp))
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 11f69bbf9307..b6a2aa1dcf56 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -149,11 +149,19 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
* is freed by GSO engine
*/
if (copy_destructor) {
+ int delta;
+
swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor);
sum_truesize += skb->truesize;
- refcount_add(sum_truesize - gso_skb->truesize,
- &skb->sk->sk_wmem_alloc);
+ delta = sum_truesize - gso_skb->truesize;
+ /* In some pathological cases, delta can be negative.
+ * We need to either use refcount_add() or refcount_sub_and_test()
+ */
+ if (likely(delta >= 0))
+ refcount_add(delta, &skb->sk->sk_wmem_alloc);
+ else
+ WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
}
delta = htonl(oldlen + (skb_tail_pointer(skb) -
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 4d322c1b7233..e4280b6568b4 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -123,6 +123,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
+ struct iphdr *iph;
int length;
if (!pskb_may_pull(skb, 4))
@@ -178,24 +179,17 @@ pass_up:
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel) {
- sk = tunnel->sock;
- sock_hold(sk);
- } else {
- struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
-
- read_lock_bh(&l2tp_ip_lock);
- sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
- inet_iif(skb), tunnel_id);
- if (!sk) {
- read_unlock_bh(&l2tp_ip_lock);
- goto discard;
- }
+ iph = (struct iphdr *)skb_network_header(skb);
- sock_hold(sk);
+ read_lock_bh(&l2tp_ip_lock);
+ sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
+ tunnel_id);
+ if (!sk) {
read_unlock_bh(&l2tp_ip_lock);
+ goto discard;
}
+ sock_hold(sk);
+ read_unlock_bh(&l2tp_ip_lock);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 88b397c30d86..8bcaa975b432 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -136,6 +136,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
+ struct ipv6hdr *iph;
int length;
if (!pskb_may_pull(skb, 4))
@@ -192,24 +193,17 @@ pass_up:
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel) {
- sk = tunnel->sock;
- sock_hold(sk);
- } else {
- struct ipv6hdr *iph = ipv6_hdr(skb);
-
- read_lock_bh(&l2tp_ip6_lock);
- sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
- inet6_iif(skb), tunnel_id);
- if (!sk) {
- read_unlock_bh(&l2tp_ip6_lock);
- goto discard;
- }
+ iph = ipv6_hdr(skb);
- sock_hold(sk);
+ read_lock_bh(&l2tp_ip6_lock);
+ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
+ inet6_iif(skb), tunnel_id);
+ if (!sk) {
read_unlock_bh(&l2tp_ip6_lock);
+ goto discard;
}
+ sock_hold(sk);
+ read_unlock_bh(&l2tp_ip6_lock);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index c2f5c13550c0..78418f38464a 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -1085,7 +1085,7 @@ static int __init qrtr_proto_init(void)
return 0;
}
-module_init(qrtr_proto_init);
+postcore_initcall(qrtr_proto_init);
static void __exit qrtr_proto_fini(void)
{
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 9722bf839d9d..b4e421aa9727 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -410,14 +410,14 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
break;
}
- /* XXX when can this fail? */
- ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
- rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
+ rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
(long) ib_sg_dma_address(
ic->i_cm_id->device,
- &recv->r_frag->f_sg),
- ret);
+ &recv->r_frag->f_sg));
+
+ /* XXX when can this fail? */
+ ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
if (ret) {
rds_ib_conn_error(conn, "recv post on "
"%pI4 returned %d, disconnecting and "
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index ca2ff0b3123f..8f2c63514956 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -78,7 +78,6 @@ static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
spin_lock_bh(&idrinfo->lock);
idr_remove_ext(&idrinfo->action_idr, p->tcfa_index);
spin_unlock_bh(&idrinfo->lock);
- put_net(idrinfo->net);
gen_kill_estimator(&p->tcfa_rate_est);
free_tcf(p);
}
@@ -337,7 +336,6 @@ err3:
p->idrinfo = idrinfo;
p->ops = ops;
INIT_LIST_HEAD(&p->list);
- get_net(idrinfo->net);
*a = p;
return 0;
}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 9bce8cc84cbb..c0c707eb2c96 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -398,7 +398,7 @@ static __net_init int bpf_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, bpf_net_id);
- return tc_action_net_init(tn, &act_bpf_ops, net);
+ return tc_action_net_init(tn, &act_bpf_ops);
}
static void __net_exit bpf_exit_net(struct net *net)
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 34e52d01a5dd..10b7a8855a6c 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -206,7 +206,7 @@ static __net_init int connmark_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, connmark_net_id);
- return tc_action_net_init(tn, &act_connmark_ops, net);
+ return tc_action_net_init(tn, &act_connmark_ops);
}
static void __net_exit connmark_exit_net(struct net *net)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 35171df2ebef..1c40caadcff9 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -626,7 +626,7 @@ static __net_init int csum_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, csum_net_id);
- return tc_action_net_init(tn, &act_csum_ops, net);
+ return tc_action_net_init(tn, &act_csum_ops);
}
static void __net_exit csum_exit_net(struct net *net)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index ef7f7f39d26d..e29a48ef7fc3 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -232,7 +232,7 @@ static __net_init int gact_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, gact_net_id);
- return tc_action_net_init(tn, &act_gact_ops, net);
+ return tc_action_net_init(tn, &act_gact_ops);
}
static void __net_exit gact_exit_net(struct net *net)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index f65e4b5058e0..8ccd35825b6b 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -818,7 +818,7 @@ static __net_init int ife_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, ife_net_id);
- return tc_action_net_init(tn, &act_ife_ops, net);
+ return tc_action_net_init(tn, &act_ife_ops);
}
static void __net_exit ife_exit_net(struct net *net)
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index dbdf3b2470d5..d9e399a7e3d5 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -334,7 +334,7 @@ static __net_init int ipt_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, ipt_net_id);
- return tc_action_net_init(tn, &act_ipt_ops, net);
+ return tc_action_net_init(tn, &act_ipt_ops);
}
static void __net_exit ipt_exit_net(struct net *net)
@@ -384,7 +384,7 @@ static __net_init int xt_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, xt_net_id);
- return tc_action_net_init(tn, &act_xt_ops, net);
+ return tc_action_net_init(tn, &act_xt_ops);
}
static void __net_exit xt_exit_net(struct net *net)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 84759cfd5a33..416627c66f08 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -343,7 +343,7 @@ static __net_init int mirred_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, mirred_net_id);
- return tc_action_net_init(tn, &act_mirred_ops, net);
+ return tc_action_net_init(tn, &act_mirred_ops);
}
static void __net_exit mirred_exit_net(struct net *net)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 7eeaaf9217b6..c365d01b99c8 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -307,7 +307,7 @@ static __net_init int nat_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, nat_net_id);
- return tc_action_net_init(tn, &act_nat_ops, net);
+ return tc_action_net_init(tn, &act_nat_ops);
}
static void __net_exit nat_exit_net(struct net *net)
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index b3d82c334a5f..491fe5deb09e 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -450,7 +450,7 @@ static __net_init int pedit_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, pedit_net_id);
- return tc_action_net_init(tn, &act_pedit_ops, net);
+ return tc_action_net_init(tn, &act_pedit_ops);
}
static void __net_exit pedit_exit_net(struct net *net)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 9ec42b26e4b9..3bb2ebf9e9ae 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -331,7 +331,7 @@ static __net_init int police_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, police_net_id);
- return tc_action_net_init(tn, &act_police_ops, net);
+ return tc_action_net_init(tn, &act_police_ops);
}
static void __net_exit police_exit_net(struct net *net)
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index e69a1e3a39bf..8b5abcd2f32f 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -240,7 +240,7 @@ static __net_init int sample_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, sample_net_id);
- return tc_action_net_init(tn, &act_sample_ops, net);
+ return tc_action_net_init(tn, &act_sample_ops);
}
static void __net_exit sample_exit_net(struct net *net)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index a8d0ea95f894..e7b57e5071a3 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -201,7 +201,7 @@ static __net_init int simp_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, simp_net_id);
- return tc_action_net_init(tn, &act_simp_ops, net);
+ return tc_action_net_init(tn, &act_simp_ops);
}
static void __net_exit simp_exit_net(struct net *net)
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index fbac62472e09..59949d61f20d 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -238,7 +238,7 @@ static __net_init int skbedit_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
- return tc_action_net_init(tn, &act_skbedit_ops, net);
+ return tc_action_net_init(tn, &act_skbedit_ops);
}
static void __net_exit skbedit_exit_net(struct net *net)
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 8e12d8897d2f..b642ad3d39dd 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -263,7 +263,7 @@ static __net_init int skbmod_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
- return tc_action_net_init(tn, &act_skbmod_ops, net);
+ return tc_action_net_init(tn, &act_skbmod_ops);
}
static void __net_exit skbmod_exit_net(struct net *net)
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index c33faa373cf2..30c96274c638 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -322,7 +322,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
- return tc_action_net_init(tn, &act_tunnel_key_ops, net);
+ return tc_action_net_init(tn, &act_tunnel_key_ops);
}
static void __net_exit tunnel_key_exit_net(struct net *net)
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 115fc33cc6d8..16eb067a8d8f 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -269,7 +269,7 @@ static __net_init int vlan_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, vlan_net_id);
- return tc_action_net_init(tn, &act_vlan_ops, net);
+ return tc_action_net_init(tn, &act_vlan_ops);
}
static void __net_exit vlan_exit_net(struct net *net)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b2d310745487..ecbb019efcbd 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -927,6 +927,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
exts->actions[i++] = act;
exts->nr_actions = i;
}
+ exts->net = net;
}
#else
if ((exts->action && tb[exts->action]) ||
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index f177649a2419..e43c56d5b96a 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -85,16 +85,21 @@ static int basic_init(struct tcf_proto *tp)
return 0;
}
+static void __basic_delete_filter(struct basic_filter *f)
+{
+ tcf_exts_destroy(&f->exts);
+ tcf_em_tree_destroy(&f->ematches);
+ tcf_exts_put_net(&f->exts);
+ kfree(f);
+}
+
static void basic_delete_filter_work(struct work_struct *work)
{
struct basic_filter *f = container_of(work, struct basic_filter, work);
rtnl_lock();
- tcf_exts_destroy(&f->exts);
- tcf_em_tree_destroy(&f->ematches);
+ __basic_delete_filter(f);
rtnl_unlock();
-
- kfree(f);
}
static void basic_delete_filter(struct rcu_head *head)
@@ -113,7 +118,10 @@ static void basic_destroy(struct tcf_proto *tp)
list_for_each_entry_safe(f, n, &head->flist, link) {
list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res);
- call_rcu(&f->rcu, basic_delete_filter);
+ if (tcf_exts_get_net(&f->exts))
+ call_rcu(&f->rcu, basic_delete_filter);
+ else
+ __basic_delete_filter(f);
}
kfree_rcu(head, rcu);
}
@@ -125,6 +133,7 @@ static int basic_delete(struct tcf_proto *tp, void *arg, bool *last)
list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res);
+ tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, basic_delete_filter);
*last = list_empty(&head->flist);
return 0;
@@ -219,6 +228,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (fold) {
list_replace_rcu(&fold->link, &fnew->link);
tcf_unbind_filter(tp, &fold->res);
+ tcf_exts_get_net(&fold->exts);
call_rcu(&fold->rcu, basic_delete_filter);
} else {
list_add_rcu(&fnew->link, &head->flist);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 037a3ae86829..990eb4d91d54 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -249,6 +249,7 @@ static int cls_bpf_init(struct tcf_proto *tp)
static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
{
tcf_exts_destroy(&prog->exts);
+ tcf_exts_put_net(&prog->exts);
if (cls_bpf_is_ebpf(prog))
bpf_prog_put(prog->filter);
@@ -282,7 +283,10 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
- call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
+ if (tcf_exts_get_net(&prog->exts))
+ call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
+ else
+ __cls_bpf_delete_prog(prog);
}
static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
@@ -516,6 +520,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (oldprog) {
list_replace_rcu(&oldprog->link, &prog->link);
tcf_unbind_filter(tp, &oldprog->res);
+ tcf_exts_get_net(&oldprog->exts);
call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
} else {
list_add_rcu(&prog->link, &head->plist);
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index a97e069bee89..309d5899265f 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -60,15 +60,21 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
};
+static void __cls_cgroup_destroy(struct cls_cgroup_head *head)
+{
+ tcf_exts_destroy(&head->exts);
+ tcf_em_tree_destroy(&head->ematches);
+ tcf_exts_put_net(&head->exts);
+ kfree(head);
+}
+
static void cls_cgroup_destroy_work(struct work_struct *work)
{
struct cls_cgroup_head *head = container_of(work,
struct cls_cgroup_head,
work);
rtnl_lock();
- tcf_exts_destroy(&head->exts);
- tcf_em_tree_destroy(&head->ematches);
- kfree(head);
+ __cls_cgroup_destroy(head);
rtnl_unlock();
}
@@ -124,8 +130,10 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
goto errout;
rcu_assign_pointer(tp->root, new);
- if (head)
+ if (head) {
+ tcf_exts_get_net(&head->exts);
call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
+ }
return 0;
errout:
tcf_exts_destroy(&new->exts);
@@ -138,8 +146,12 @@ static void cls_cgroup_destroy(struct tcf_proto *tp)
struct cls_cgroup_head *head = rtnl_dereference(tp->root);
/* Head can still be NULL due to cls_cgroup_init(). */
- if (head)
- call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
+ if (head) {
+ if (tcf_exts_get_net(&head->exts))
+ call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
+ else
+ __cls_cgroup_destroy(head);
+ }
}
static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last)
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 67f3a2af6aab..85f765cff697 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -372,15 +372,21 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_PERTURB] = { .type = NLA_U32 },
};
-static void flow_destroy_filter_work(struct work_struct *work)
+static void __flow_destroy_filter(struct flow_filter *f)
{
- struct flow_filter *f = container_of(work, struct flow_filter, work);
-
- rtnl_lock();
del_timer_sync(&f->perturb_timer);
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
+ tcf_exts_put_net(&f->exts);
kfree(f);
+}
+
+static void flow_destroy_filter_work(struct work_struct *work)
+{
+ struct flow_filter *f = container_of(work, struct flow_filter, work);
+
+ rtnl_lock();
+ __flow_destroy_filter(f);
rtnl_unlock();
}
@@ -552,8 +558,10 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
*arg = fnew;
- if (fold)
+ if (fold) {
+ tcf_exts_get_net(&fold->exts);
call_rcu(&fold->rcu, flow_destroy_filter);
+ }
return 0;
err2:
@@ -570,6 +578,7 @@ static int flow_delete(struct tcf_proto *tp, void *arg, bool *last)
struct flow_filter *f = arg;
list_del_rcu(&f->list);
+ tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, flow_destroy_filter);
*last = list_empty(&head->filters);
return 0;
@@ -594,7 +603,10 @@ static void flow_destroy(struct tcf_proto *tp)
list_for_each_entry_safe(f, next, &head->filters, list) {
list_del_rcu(&f->list);
- call_rcu(&f->rcu, flow_destroy_filter);
+ if (tcf_exts_get_net(&f->exts))
+ call_rcu(&f->rcu, flow_destroy_filter);
+ else
+ __flow_destroy_filter(f);
}
kfree_rcu(head, rcu);
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 5b5722c8b32c..7a838d1c1c00 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -218,13 +218,19 @@ static int fl_init(struct tcf_proto *tp)
return 0;
}
+static void __fl_destroy_filter(struct cls_fl_filter *f)
+{
+ tcf_exts_destroy(&f->exts);
+ tcf_exts_put_net(&f->exts);
+ kfree(f);
+}
+
static void fl_destroy_filter_work(struct work_struct *work)
{
struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
rtnl_lock();
- tcf_exts_destroy(&f->exts);
- kfree(f);
+ __fl_destroy_filter(f);
rtnl_unlock();
}
@@ -318,7 +324,10 @@ static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f);
tcf_unbind_filter(tp, &f->res);
- call_rcu(&f->rcu, fl_destroy_filter);
+ if (tcf_exts_get_net(&f->exts))
+ call_rcu(&f->rcu, fl_destroy_filter);
+ else
+ __fl_destroy_filter(f);
}
static void fl_destroy_sleepable(struct work_struct *work)
@@ -988,6 +997,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
list_replace_rcu(&fold->list, &fnew->list);
tcf_unbind_filter(tp, &fold->res);
+ tcf_exts_get_net(&fold->exts);
call_rcu(&fold->rcu, fl_destroy_filter);
} else {
list_add_tail_rcu(&fnew->list, &head->filters);
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 99183b8621ec..7f45e5ab8afc 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -122,13 +122,19 @@ static int fw_init(struct tcf_proto *tp)
return 0;
}
+static void __fw_delete_filter(struct fw_filter *f)
+{
+ tcf_exts_destroy(&f->exts);
+ tcf_exts_put_net(&f->exts);
+ kfree(f);
+}
+
static void fw_delete_filter_work(struct work_struct *work)
{
struct fw_filter *f = container_of(work, struct fw_filter, work);
rtnl_lock();
- tcf_exts_destroy(&f->exts);
- kfree(f);
+ __fw_delete_filter(f);
rtnl_unlock();
}
@@ -154,7 +160,10 @@ static void fw_destroy(struct tcf_proto *tp)
RCU_INIT_POINTER(head->ht[h],
rtnl_dereference(f->next));
tcf_unbind_filter(tp, &f->res);
- call_rcu(&f->rcu, fw_delete_filter);
+ if (tcf_exts_get_net(&f->exts))
+ call_rcu(&f->rcu, fw_delete_filter);
+ else
+ __fw_delete_filter(f);
}
}
kfree_rcu(head, rcu);
@@ -179,6 +188,7 @@ static int fw_delete(struct tcf_proto *tp, void *arg, bool *last)
if (pfp == f) {
RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
tcf_unbind_filter(tp, &f->res);
+ tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, fw_delete_filter);
ret = 0;
break;
@@ -299,6 +309,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next));
rcu_assign_pointer(*fp, fnew);
tcf_unbind_filter(tp, &f->res);
+ tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, fw_delete_filter);
*arg = fnew;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index c33f711b9019..3684153cd8a9 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -44,13 +44,19 @@ static int mall_init(struct tcf_proto *tp)
return 0;
}
+static void __mall_destroy(struct cls_mall_head *head)
+{
+ tcf_exts_destroy(&head->exts);
+ tcf_exts_put_net(&head->exts);
+ kfree(head);
+}
+
static void mall_destroy_work(struct work_struct *work)
{
struct cls_mall_head *head = container_of(work, struct cls_mall_head,
work);
rtnl_lock();
- tcf_exts_destroy(&head->exts);
- kfree(head);
+ __mall_destroy(head);
rtnl_unlock();
}
@@ -109,7 +115,10 @@ static void mall_destroy(struct tcf_proto *tp)
if (tc_should_offload(dev, head->flags))
mall_destroy_hw_filter(tp, head, (unsigned long) head);
- call_rcu(&head->rcu, mall_destroy_rcu);
+ if (tcf_exts_get_net(&head->exts))
+ call_rcu(&head->rcu, mall_destroy_rcu);
+ else
+ __mall_destroy(head);
}
static void *mall_get(struct tcf_proto *tp, u32 handle)
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 4b14ccd8b8f2..ac9a5b8825b9 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -257,13 +257,19 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
+static void __route4_delete_filter(struct route4_filter *f)
+{
+ tcf_exts_destroy(&f->exts);
+ tcf_exts_put_net(&f->exts);
+ kfree(f);
+}
+
static void route4_delete_filter_work(struct work_struct *work)
{
struct route4_filter *f = container_of(work, struct route4_filter, work);
rtnl_lock();
- tcf_exts_destroy(&f->exts);
- kfree(f);
+ __route4_delete_filter(f);
rtnl_unlock();
}
@@ -297,7 +303,10 @@ static void route4_destroy(struct tcf_proto *tp)
next = rtnl_dereference(f->next);
RCU_INIT_POINTER(b->ht[h2], next);
tcf_unbind_filter(tp, &f->res);
- call_rcu(&f->rcu, route4_delete_filter);
+ if (tcf_exts_get_net(&f->exts))
+ call_rcu(&f->rcu, route4_delete_filter);
+ else
+ __route4_delete_filter(f);
}
}
RCU_INIT_POINTER(head->table[h1], NULL);
@@ -338,6 +347,7 @@ static int route4_delete(struct tcf_proto *tp, void *arg, bool *last)
/* Delete it */
tcf_unbind_filter(tp, &f->res);
+ tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, route4_delete_filter);
/* Strip RTNL protected tree */
@@ -541,6 +551,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
*arg = f;
if (fold) {
tcf_unbind_filter(tp, &fold->res);
+ tcf_exts_get_net(&fold->exts);
call_rcu(&fold->rcu, route4_delete_filter);
}
return 0;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index bdbc541787f8..cf325625c99d 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -285,13 +285,19 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS;
}
+static void __rsvp_delete_filter(struct rsvp_filter *f)
+{
+ tcf_exts_destroy(&f->exts);
+ tcf_exts_put_net(&f->exts);
+ kfree(f);
+}
+
static void rsvp_delete_filter_work(struct work_struct *work)
{
struct rsvp_filter *f = container_of(work, struct rsvp_filter, work);
rtnl_lock();
- tcf_exts_destroy(&f->exts);
- kfree(f);
+ __rsvp_delete_filter(f);
rtnl_unlock();
}
@@ -310,7 +316,10 @@ static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
* grace period, since converted-to-rcu actions are relying on that
* in cleanup() callback
*/
- call_rcu(&f->rcu, rsvp_delete_filter_rcu);
+ if (tcf_exts_get_net(&f->exts))
+ call_rcu(&f->rcu, rsvp_delete_filter_rcu);
+ else
+ __rsvp_delete_filter(f);
}
static void rsvp_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index beaa95e09c25..a76937ee0b2d 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -139,13 +139,19 @@ static int tcindex_init(struct tcf_proto *tp)
return 0;
}
+static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
+{
+ tcf_exts_destroy(&r->exts);
+ tcf_exts_put_net(&r->exts);
+}
+
static void tcindex_destroy_rexts_work(struct work_struct *work)
{
struct tcindex_filter_result *r;
r = container_of(work, struct tcindex_filter_result, work);
rtnl_lock();
- tcf_exts_destroy(&r->exts);
+ __tcindex_destroy_rexts(r);
rtnl_unlock();
}
@@ -158,14 +164,20 @@ static void tcindex_destroy_rexts(struct rcu_head *head)
tcf_queue_work(&r->work);
}
+static void __tcindex_destroy_fexts(struct tcindex_filter *f)
+{
+ tcf_exts_destroy(&f->result.exts);
+ tcf_exts_put_net(&f->result.exts);
+ kfree(f);
+}
+
static void tcindex_destroy_fexts_work(struct work_struct *work)
{
struct tcindex_filter *f = container_of(work, struct tcindex_filter,
work);
rtnl_lock();
- tcf_exts_destroy(&f->result.exts);
- kfree(f);
+ __tcindex_destroy_fexts(f);
rtnl_unlock();
}
@@ -210,10 +222,17 @@ found:
* grace period, since converted-to-rcu actions are relying on that
* in cleanup() callback
*/
- if (f)
- call_rcu(&f->rcu, tcindex_destroy_fexts);
- else
- call_rcu(&r->rcu, tcindex_destroy_rexts);
+ if (f) {
+ if (tcf_exts_get_net(&f->result.exts))
+ call_rcu(&f->rcu, tcindex_destroy_fexts);
+ else
+ __tcindex_destroy_fexts(f);
+ } else {
+ if (tcf_exts_get_net(&r->exts))
+ call_rcu(&r->rcu, tcindex_destroy_rexts);
+ else
+ __tcindex_destroy_rexts(r);
+ }
*last = false;
return 0;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index dadd1b344497..b58eccb21f03 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -399,6 +399,7 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
bool free_pf)
{
tcf_exts_destroy(&n->exts);
+ tcf_exts_put_net(&n->exts);
if (n->ht_down)
n->ht_down->refcnt--;
#ifdef CONFIG_CLS_U32_PERF
@@ -476,6 +477,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
RCU_INIT_POINTER(*kp, key->next);
tcf_unbind_filter(tp, &key->res);
+ tcf_exts_get_net(&key->exts);
call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
return 0;
}
@@ -588,7 +590,10 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
rtnl_dereference(n->next));
tcf_unbind_filter(tp, &n->res);
u32_remove_hw_knode(tp, n->handle);
- call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
+ if (tcf_exts_get_net(&n->exts))
+ call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
+ else
+ u32_destroy_key(n->tp, n, true);
}
}
}
@@ -949,6 +954,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
u32_replace_knode(tp, tp_c, new);
tcf_unbind_filter(tp, &n->res);
+ tcf_exts_get_net(&n->exts);
call_rcu(&n->rcu, u32_delete_key_rcu);
return 0;
}
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 82d20ee34581..347ab31574d5 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -266,8 +266,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
goto lock;
}
- daddr = (xfrm_address_t *)(skb_network_header(skb) +
- XFRM_SPI_SKB_CB(skb)->daddroff);
family = XFRM_SPI_SKB_CB(skb)->family;
/* if tunnel is present override skb->mark value with tunnel i_key */
@@ -294,6 +292,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
goto drop;
}
+ daddr = (xfrm_address_t *)(skb_network_header(skb) +
+ XFRM_SPI_SKB_CB(skb)->daddroff);
do {
if (skb->sp->len == XFRM_MAX_DEPTH) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8cafb3c0a4ac..6eb228a70131 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1361,36 +1361,29 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
struct net *net = xp_net(policy);
int nx;
int i, error;
- xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
- xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
xfrm_address_t tmp;
for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
struct xfrm_state *x;
- xfrm_address_t *remote = daddr;
- xfrm_address_t *local = saddr;
+ xfrm_address_t *local;
+ xfrm_address_t *remote;
struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
- if (tmpl->mode == XFRM_MODE_TUNNEL ||
- tmpl->mode == XFRM_MODE_BEET) {
- remote = &tmpl->id.daddr;
- local = &tmpl->saddr;
- if (xfrm_addr_any(local, tmpl->encap_family)) {
- error = xfrm_get_saddr(net, fl->flowi_oif,
- &tmp, remote,
- tmpl->encap_family, 0);
- if (error)
- goto fail;
- local = &tmp;
- }
+ remote = &tmpl->id.daddr;
+ local = &tmpl->saddr;
+ if (xfrm_addr_any(local, tmpl->encap_family)) {
+ error = xfrm_get_saddr(net, fl->flowi_oif,
+ &tmp, remote,
+ tmpl->encap_family, 0);
+ if (error)
+ goto fail;
+ local = &tmp;
}
x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
if (x && x->km.state == XFRM_STATE_VALID) {
xfrm[nx++] = x;
- daddr = remote;
- saddr = local;
continue;
}
if (x) {
@@ -1787,19 +1780,23 @@ void xfrm_policy_cache_flush(void)
put_online_cpus();
}
-static bool xfrm_pol_dead(struct xfrm_dst *xdst)
+static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
+ struct xfrm_state * const xfrm[],
+ int num)
{
- unsigned int num_pols = xdst->num_pols;
- unsigned int pol_dead = 0, i;
+ const struct dst_entry *dst = &xdst->u.dst;
+ int i;
- for (i = 0; i < num_pols; i++)
- pol_dead |= xdst->pols[i]->walk.dead;
+ if (xdst->num_xfrms != num)
+ return false;
- /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */
- if (pol_dead)
- xdst->u.dst.obsolete = DST_OBSOLETE_DEAD;
+ for (i = 0; i < num; i++) {
+ if (!dst || dst->xfrm != xfrm[i])
+ return false;
+ dst = dst->child;
+ }
- return pol_dead;
+ return xfrm_bundle_ok(xdst);
}
static struct xfrm_dst *
@@ -1813,26 +1810,28 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
struct dst_entry *dst;
int err;
+ /* Try to instantiate a bundle */
+ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
+ if (err <= 0) {
+ if (err != 0 && err != -EAGAIN)
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+ return ERR_PTR(err);
+ }
+
xdst = this_cpu_read(xfrm_last_dst);
if (xdst &&
xdst->u.dst.dev == dst_orig->dev &&
xdst->num_pols == num_pols &&
- !xfrm_pol_dead(xdst) &&
memcmp(xdst->pols, pols,
sizeof(struct xfrm_policy *) * num_pols) == 0 &&
- xfrm_bundle_ok(xdst)) {
+ xfrm_xdst_can_reuse(xdst, xfrm, err)) {
dst_hold(&xdst->u.dst);
+ while (err > 0)
+ xfrm_state_put(xfrm[--err]);
return xdst;
}
old = xdst;
- /* Try to instantiate a bundle */
- err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
- if (err <= 0) {
- if (err != 0 && err != -EAGAIN)
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
- return ERR_PTR(err);
- }
dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
if (IS_ERR(dst)) {
diff --git a/samples/connector/cn_test.c b/samples/connector/cn_test.c
index d12cc944b696..95cd06f4ec1e 100644
--- a/samples/connector/cn_test.c
+++ b/samples/connector/cn_test.c
@@ -125,12 +125,12 @@ static int cn_test_want_notify(void)
#endif
static u32 cn_test_timer_counter;
-static void cn_test_timer_func(unsigned long __data)
+static void cn_test_timer_func(struct timer_list *unused)
{
struct cn_msg *m;
char data[32];
- pr_debug("%s: timer fired with data %lu\n", __func__, __data);
+ pr_debug("%s: timer fired\n", __func__);
m = kzalloc(sizeof(*m) + sizeof(data), GFP_ATOMIC);
if (m) {
@@ -168,7 +168,7 @@ static int cn_test_init(void)
goto err_out;
}
- setup_timer(&cn_test_timer, cn_test_timer_func, 0);
+ timer_setup(&cn_test_timer, cn_test_timer_func, 0);
mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000));
pr_info("initialized with id={%u.%u}\n",
diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check
new file mode 100755
index 000000000000..bc1659900e89
--- /dev/null
+++ b/scripts/documentation-file-ref-check
@@ -0,0 +1,15 @@
+#!/bin/sh
+# Treewide grep for references to files under Documentation, and report
+# non-existing files in stderr.
+
+for f in $(git ls-files); do
+ for ref in $(grep -ho "Documentation/[A-Za-z0-9_.,~/*+-]*" "$f"); do
+ # presume trailing . and , are not part of the name
+ ref=${ref%%[.,]}
+
+ # use ls to handle wildcards
+ if ! ls $ref >/dev/null 2>&1; then
+ echo "$f: $ref" >&2
+ fi
+ done
+done
diff --git a/scripts/find-unused-docs.sh b/scripts/find-unused-docs.sh
new file mode 100755
index 000000000000..3f46f8977dc4
--- /dev/null
+++ b/scripts/find-unused-docs.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# (c) 2017, Jonathan Corbet <corbet@lwn.net>
+# sayli karnik <karniksayli1995@gmail.com>
+#
+# This script detects files with kernel-doc comments for exported functions
+# that are not included in documentation.
+#
+# usage: Run 'scripts/find-unused-docs.sh directory' from top level of kernel
+# tree.
+#
+# example: $scripts/find-unused-docs.sh drivers/scsi
+#
+# Licensed under the terms of the GNU GPL License
+
+if ! [ -d "Documentation" ]; then
+ echo "Run from top level of kernel tree"
+ exit 1
+fi
+
+if [ "$#" -ne 1 ]; then
+ echo "Usage: scripts/find-unused-docs.sh directory"
+ exit 1
+fi
+
+if ! [ -d "$1" ]; then
+ echo "Directory $1 doesn't exist"
+ exit 1
+fi
+
+cd "$( dirname "${BASH_SOURCE[0]}" )"
+cd ..
+
+cd Documentation/
+
+echo "The following files contain kerneldoc comments for exported functions \
+that are not used in the formatted documentation"
+
+# FILES INCLUDED
+
+files_included=($(grep -rHR ".. kernel-doc" --include \*.rst | cut -d " " -f 3))
+
+declare -A FILES_INCLUDED
+
+for each in "${files_included[@]}"; do
+ FILES_INCLUDED[$each]="$each"
+ done
+
+cd ..
+
+# FILES NOT INCLUDED
+
+for file in `find $1 -name '*.c'`; do
+
+ if [[ ${FILES_INCLUDED[$file]+_} ]]; then
+ continue;
+ fi
+ str=$(scripts/kernel-doc -text -export "$file" 2>/dev/null)
+ if [[ -n "$str" ]]; then
+ echo "$file"
+ fi
+ done
+
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 9d3eafea58f0..67d051edd615 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2168,7 +2168,7 @@ sub dump_struct($$) {
my $nested;
if ($x =~ /(struct|union)\s+(\w+)\s*{(.*)}/) {
- #my $decl_type = $1;
+ my $decl_type = $1;
$declaration_name = $2;
my $members = $3;
@@ -2194,7 +2194,7 @@ sub dump_struct($$) {
$members =~ s/DECLARE_HASHTABLE\s*\(([^,)]+), ([^,)]+)\)/unsigned long $1\[1 << (($2) - 1)\]/gos;
create_parameterlist($members, ';', $file);
- check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
+ check_sections($file, $declaration_name, $decl_type, $sectcheck, $struct_actual, $nested);
output_declaration($declaration_name,
'struct',
@@ -2226,6 +2226,8 @@ sub dump_enum($$) {
if ($x =~ /enum\s+(\w+)\s*{(.*)}/) {
$declaration_name = $1;
my $members = $2;
+ my %_members;
+
$members =~ s/\s+$//;
foreach my $arg (split ',', $members) {
@@ -2236,9 +2238,16 @@ sub dump_enum($$) {
print STDERR "${file}:$.: warning: Enum value '$arg' ".
"not described in enum '$declaration_name'\n";
}
-
+ $_members{$arg} = 1;
}
+ while (my ($k, $v) = each %parameterdescs) {
+ if (!exists($_members{$k})) {
+ print STDERR "${file}:$.: warning: Excess enum value " .
+ "'$k' description in '$declaration_name'\n";
+ }
+ }
+
output_declaration($declaration_name,
'enum',
{'enum' => $declaration_name,
@@ -2506,7 +2515,7 @@ sub check_sections($$$$$$) {
} else {
if ($nested !~ m/\Q$sects[$sx]\E/) {
print STDERR "${file}:$.: warning: " .
- "Excess struct/union/enum/typedef member " .
+ "Excess $decl_type member " .
"'$sects[$sx]' " .
"description in '$decl_name'\n";
++$warnings;
diff --git a/scripts/leaking_addresses.pl b/scripts/leaking_addresses.pl
new file mode 100755
index 000000000000..2977371b2956
--- /dev/null
+++ b/scripts/leaking_addresses.pl
@@ -0,0 +1,305 @@
+#!/usr/bin/env perl
+#
+# (c) 2017 Tobin C. Harding <me@tobin.cc>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# leaking_addresses.pl: Scan 64 bit kernel for potential leaking addresses.
+# - Scans dmesg output.
+# - Walks directory tree and parses each file (for each directory in @DIRS).
+#
+# You can configure the behaviour of the script;
+#
+# - By adding paths, for directories you do not want to walk;
+# absolute paths: @skip_walk_dirs_abs
+# directory names: @skip_walk_dirs_any
+#
+# - By adding paths, for files you do not want to parse;
+# absolute paths: @skip_parse_files_abs
+# file names: @skip_parse_files_any
+#
+# The use of @skip_xxx_xxx_any causes files to be skipped where ever they occur.
+# For example adding 'fd' to @skip_walk_dirs_any causes the fd/ directory to be
+# skipped for all PID sub-directories of /proc
+#
+# The same thing can be achieved by passing command line options to --dont-walk
+# and --dont-parse. If absolute paths are supplied to these options they are
+# appended to the @skip_xxx_xxx_abs arrays. If file names are supplied to these
+# options, they are appended to the @skip_xxx_xxx_any arrays.
+#
+# Use --debug to output path before parsing, this is useful to find files that
+# cause the script to choke.
+#
+# You may like to set kptr_restrict=2 before running script
+# (see Documentation/sysctl/kernel.txt).
+
+use warnings;
+use strict;
+use POSIX;
+use File::Basename;
+use File::Spec;
+use Cwd 'abs_path';
+use Term::ANSIColor qw(:constants);
+use Getopt::Long qw(:config no_auto_abbrev);
+
+my $P = $0;
+my $V = '0.01';
+
+# Directories to scan.
+my @DIRS = ('/proc', '/sys');
+
+# Command line options.
+my $help = 0;
+my $debug = 0;
+my @dont_walk = ();
+my @dont_parse = ();
+
+# Do not parse these files (absolute path).
+my @skip_parse_files_abs = ('/proc/kmsg',
+ '/proc/kcore',
+ '/proc/fs/ext4/sdb1/mb_groups',
+ '/proc/1/fd/3',
+ '/sys/kernel/debug/tracing/trace_pipe',
+ '/sys/kernel/security/apparmor/revision');
+
+# Do not parse thes files under any subdirectory.
+my @skip_parse_files_any = ('0',
+ '1',
+ '2',
+ 'pagemap',
+ 'events',
+ 'access',
+ 'registers',
+ 'snapshot_raw',
+ 'trace_pipe_raw',
+ 'ptmx',
+ 'trace_pipe');
+
+# Do not walk these directories (absolute path).
+my @skip_walk_dirs_abs = ();
+
+# Do not walk these directories under any subdirectory.
+my @skip_walk_dirs_any = ('self',
+ 'thread-self',
+ 'cwd',
+ 'fd',
+ 'stderr',
+ 'stdin',
+ 'stdout');
+
+sub help
+{
+ my ($exitcode) = @_;
+
+ print << "EOM";
+Usage: $P [OPTIONS]
+Version: $V
+
+Options:
+
+ --dont-walk=<dir> Don't walk tree starting at <dir>.
+ --dont-parse=<file> Don't parse <file>.
+ -d, --debug Display debugging output.
+ -h, --help, --version Display this help and exit.
+
+If an absolute path is passed to --dont_XXX then this path is skipped. If a
+single filename is passed then this file/directory will be skipped when
+appearing under any subdirectory.
+
+Example:
+
+ # Just scan dmesg output.
+ scripts/leaking_addresses.pl --dont_walk_abs /proc --dont_walk_abs /sys
+
+Scans the running (64 bit) kernel for potential leaking addresses.
+
+EOM
+ exit($exitcode);
+}
+
+GetOptions(
+ 'dont-walk=s' => \@dont_walk,
+ 'dont-parse=s' => \@dont_parse,
+ 'd|debug' => \$debug,
+ 'h|help' => \$help,
+ 'version' => \$help
+) or help(1);
+
+help(0) if ($help);
+
+push_to_global();
+
+parse_dmesg();
+walk(@DIRS);
+
+exit 0;
+
+sub debug_arrays
+{
+ print 'dirs_any: ' . join(", ", @skip_walk_dirs_any) . "\n";
+ print 'dirs_abs: ' . join(", ", @skip_walk_dirs_abs) . "\n";
+ print 'parse_any: ' . join(", ", @skip_parse_files_any) . "\n";
+ print 'parse_abs: ' . join(", ", @skip_parse_files_abs) . "\n";
+}
+
+sub dprint
+{
+ printf(STDERR @_) if $debug;
+}
+
+sub push_in_abs_any
+{
+ my ($in, $abs, $any) = @_;
+
+ foreach my $path (@$in) {
+ if (File::Spec->file_name_is_absolute($path)) {
+ push @$abs, $path;
+ } elsif (index($path,'/') == -1) {
+ push @$any, $path;
+ } else {
+ print 'path error: ' . $path;
+ }
+ }
+}
+
+# Push command line options to global arrays.
+sub push_to_global
+{
+ push_in_abs_any(\@dont_walk, \@skip_walk_dirs_abs, \@skip_walk_dirs_any);
+ push_in_abs_any(\@dont_parse, \@skip_parse_files_abs, \@skip_parse_files_any);
+}
+
+sub is_false_positive
+{
+ my ($match) = @_;
+
+ if ($match =~ '\b(0x)?(f|F){16}\b' or
+ $match =~ '\b(0x)?0{16}\b') {
+ return 1;
+ }
+
+ # vsyscall memory region, we should probably check against a range here.
+ if ($match =~ '\bf{10}600000\b' or
+ $match =~ '\bf{10}601000\b') {
+ return 1;
+ }
+
+ return 0;
+}
+
+# True if argument potentially contains a kernel address.
+sub may_leak_address
+{
+ my ($line) = @_;
+ my $address = '\b(0x)?ffff[[:xdigit:]]{12}\b';
+
+ # Signal masks.
+ if ($line =~ '^SigBlk:' or
+ $line =~ '^SigCgt:') {
+ return 0;
+ }
+
+ if ($line =~ '\bKEY=[[:xdigit:]]{14} [[:xdigit:]]{16} [[:xdigit:]]{16}\b' or
+ $line =~ '\b[[:xdigit:]]{14} [[:xdigit:]]{16} [[:xdigit:]]{16}\b') {
+ return 0;
+ }
+
+ while (/($address)/g) {
+ if (!is_false_positive($1)) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+sub parse_dmesg
+{
+ open my $cmd, '-|', 'dmesg';
+ while (<$cmd>) {
+ if (may_leak_address($_)) {
+ print 'dmesg: ' . $_;
+ }
+ }
+ close $cmd;
+}
+
+# True if we should skip this path.
+sub skip
+{
+ my ($path, $paths_abs, $paths_any) = @_;
+
+ foreach (@$paths_abs) {
+ return 1 if (/^$path$/);
+ }
+
+ my($filename, $dirs, $suffix) = fileparse($path);
+ foreach (@$paths_any) {
+ return 1 if (/^$filename$/);
+ }
+
+ return 0;
+}
+
+sub skip_parse
+{
+ my ($path) = @_;
+ return skip($path, \@skip_parse_files_abs, \@skip_parse_files_any);
+}
+
+sub parse_file
+{
+ my ($file) = @_;
+
+ if (! -R $file) {
+ return;
+ }
+
+ if (skip_parse($file)) {
+ dprint "skipping file: $file\n";
+ return;
+ }
+ dprint "parsing: $file\n";
+
+ open my $fh, "<", $file or return;
+ while ( <$fh> ) {
+ if (may_leak_address($_)) {
+ print $file . ': ' . $_;
+ }
+ }
+ close $fh;
+}
+
+
+# True if we should skip walking this directory.
+sub skip_walk
+{
+ my ($path) = @_;
+ return skip($path, \@skip_walk_dirs_abs, \@skip_walk_dirs_any)
+}
+
+# Recursively walk directory tree.
+sub walk
+{
+ my @dirs = @_;
+ my %seen;
+
+ while (my $pwd = shift @dirs) {
+ next if (skip_walk($pwd));
+ next if (!opendir(DIR, $pwd));
+ my @files = readdir(DIR);
+ closedir(DIR);
+
+ foreach my $file (@files) {
+ next if ($file eq '.' or $file eq '..');
+
+ my $path = "$pwd/$file";
+ next if (-l $path);
+
+ if (-d $path) {
+ push @dirs, $path;
+ } else {
+ parse_file($path);
+ }
+ }
+ }
+}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 98314b400a95..f51cf977c65b 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1963,7 +1963,7 @@ static void read_symbols(char *modname)
}
license = get_modinfo(info.modinfo, info.modinfo_len, "license");
- if (info.modinfo && !license && !is_vmlinux(modname))
+ if (!license && !is_vmlinux(modname))
warn("modpost: missing MODULE_LICENSE() in %s\n"
"see include/linux/module.h for "
"more information\n", modname);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 66fb9ede9447..7ca0032e7ba9 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -128,7 +128,7 @@ static inline int map_signal_num(int sig)
return SIGUNKNOWN;
else if (sig >= SIGRTMIN)
return sig - SIGRTMIN + 128; /* rt sigs mapped to 128 */
- else if (sig <= MAXMAPPED_SIG)
+ else if (sig < MAXMAPPED_SIG)
return sig_map[sig];
return SIGUNKNOWN;
}
@@ -163,7 +163,7 @@ static void audit_signal_cb(struct audit_buffer *ab, void *va)
audit_signal_mask(ab, aad(sa)->denied);
}
}
- if (aad(sa)->signal <= MAXMAPPED_SIG)
+ if (aad(sa)->signal < MAXMAPPED_SIG)
audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
else
audit_log_format(ab, " signal=rtmin+%d",
diff --git a/security/commoncap.c b/security/commoncap.c
index fc46f5b85251..4f8e09340956 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -536,7 +536,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
bool *effective,
- bool *has_cap)
+ bool *has_fcap)
{
struct cred *new = bprm->cred;
unsigned i;
@@ -546,7 +546,7 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
*effective = true;
if (caps->magic_etc & VFS_CAP_REVISION_MASK)
- *has_cap = true;
+ *has_fcap = true;
CAP_FOR_EACH_U32(i) {
__u32 permitted = caps->permitted.cap[i];
@@ -653,7 +653,7 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
-static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap)
+static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_fcap)
{
int rc = 0;
struct cpu_vfs_cap_data vcaps;
@@ -684,7 +684,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_c
goto out;
}
- rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap);
+ rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_fcap);
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
@@ -696,6 +696,115 @@ out:
return rc;
}
+static inline bool root_privileged(void) { return !issecure(SECURE_NOROOT); }
+
+static inline bool __is_real(kuid_t uid, struct cred *cred)
+{ return uid_eq(cred->uid, uid); }
+
+static inline bool __is_eff(kuid_t uid, struct cred *cred)
+{ return uid_eq(cred->euid, uid); }
+
+static inline bool __is_suid(kuid_t uid, struct cred *cred)
+{ return !__is_real(uid, cred) && __is_eff(uid, cred); }
+
+/*
+ * handle_privileged_root - Handle case of privileged root
+ * @bprm: The execution parameters, including the proposed creds
+ * @has_fcap: Are any file capabilities set?
+ * @effective: Do we have effective root privilege?
+ * @root_uid: This namespace' root UID WRT initial USER namespace
+ *
+ * Handle the case where root is privileged and hasn't been neutered by
+ * SECURE_NOROOT. If file capabilities are set, they won't be combined with
+ * set UID root and nothing is changed. If we are root, cap_permitted is
+ * updated. If we have become set UID root, the effective bit is set.
+ */
+static void handle_privileged_root(struct linux_binprm *bprm, bool has_fcap,
+ bool *effective, kuid_t root_uid)
+{
+ const struct cred *old = current_cred();
+ struct cred *new = bprm->cred;
+
+ if (!root_privileged())
+ return;
+ /*
+ * If the legacy file capability is set, then don't set privs
+ * for a setuid root binary run by a non-root user. Do set it
+ * for a root user just to cause least surprise to an admin.
+ */
+ if (has_fcap && __is_suid(root_uid, new)) {
+ warn_setuid_and_fcaps_mixed(bprm->filename);
+ return;
+ }
+ /*
+ * To support inheritance of root-permissions and suid-root
+ * executables under compatibility mode, we override the
+ * capability sets for the file.
+ */
+ if (__is_eff(root_uid, new) || __is_real(root_uid, new)) {
+ /* pP' = (cap_bset & ~0) | (pI & ~0) */
+ new->cap_permitted = cap_combine(old->cap_bset,
+ old->cap_inheritable);
+ }
+ /*
+ * If only the real uid is 0, we do not set the effective bit.
+ */
+ if (__is_eff(root_uid, new))
+ *effective = true;
+}
+
+#define __cap_gained(field, target, source) \
+ !cap_issubset(target->cap_##field, source->cap_##field)
+#define __cap_grew(target, source, cred) \
+ !cap_issubset(cred->cap_##target, cred->cap_##source)
+#define __cap_full(field, cred) \
+ cap_issubset(CAP_FULL_SET, cred->cap_##field)
+
+static inline bool __is_setuid(struct cred *new, const struct cred *old)
+{ return !uid_eq(new->euid, old->uid); }
+
+static inline bool __is_setgid(struct cred *new, const struct cred *old)
+{ return !gid_eq(new->egid, old->gid); }
+
+/*
+ * 1) Audit candidate if current->cap_effective is set
+ *
+ * We do not bother to audit if 3 things are true:
+ * 1) cap_effective has all caps
+ * 2) we became root *OR* are were already root
+ * 3) root is supposed to have all caps (SECURE_NOROOT)
+ * Since this is just a normal root execing a process.
+ *
+ * Number 1 above might fail if you don't have a full bset, but I think
+ * that is interesting information to audit.
+ *
+ * A number of other conditions require logging:
+ * 2) something prevented setuid root getting all caps
+ * 3) non-setuid root gets fcaps
+ * 4) non-setuid root gets ambient
+ */
+static inline bool nonroot_raised_pE(struct cred *new, const struct cred *old,
+ kuid_t root, bool has_fcap)
+{
+ bool ret = false;
+
+ if ((__cap_grew(effective, ambient, new) &&
+ !(__cap_full(effective, new) &&
+ (__is_eff(root, new) || __is_real(root, new)) &&
+ root_privileged())) ||
+ (root_privileged() &&
+ __is_suid(root, new) &&
+ !__cap_full(effective, new)) ||
+ (!__is_setuid(new, old) &&
+ ((has_fcap &&
+ __cap_gained(permitted, new, old)) ||
+ __cap_gained(ambient, new, old))))
+
+ ret = true;
+
+ return ret;
+}
+
/**
* cap_bprm_set_creds - Set up the proposed credentials for execve().
* @bprm: The execution parameters, including the proposed creds
@@ -708,61 +817,33 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
- bool effective, has_cap = false, is_setid;
+ bool effective = false, has_fcap = false, is_setid;
int ret;
kuid_t root_uid;
if (WARN_ON(!cap_ambient_invariant_ok(old)))
return -EPERM;
- effective = false;
- ret = get_file_caps(bprm, &effective, &has_cap);
+ ret = get_file_caps(bprm, &effective, &has_fcap);
if (ret < 0)
return ret;
root_uid = make_kuid(new->user_ns, 0);
- if (!issecure(SECURE_NOROOT)) {
- /*
- * If the legacy file capability is set, then don't set privs
- * for a setuid root binary run by a non-root user. Do set it
- * for a root user just to cause least surprise to an admin.
- */
- if (has_cap && !uid_eq(new->uid, root_uid) && uid_eq(new->euid, root_uid)) {
- warn_setuid_and_fcaps_mixed(bprm->filename);
- goto skip;
- }
- /*
- * To support inheritance of root-permissions and suid-root
- * executables under compatibility mode, we override the
- * capability sets for the file.
- *
- * If only the real uid is 0, we do not set the effective bit.
- */
- if (uid_eq(new->euid, root_uid) || uid_eq(new->uid, root_uid)) {
- /* pP' = (cap_bset & ~0) | (pI & ~0) */
- new->cap_permitted = cap_combine(old->cap_bset,
- old->cap_inheritable);
- }
- if (uid_eq(new->euid, root_uid))
- effective = true;
- }
-skip:
+ handle_privileged_root(bprm, has_fcap, &effective, root_uid);
/* if we have fs caps, clear dangerous personality flags */
- if (!cap_issubset(new->cap_permitted, old->cap_permitted))
+ if (__cap_gained(permitted, new, old))
bprm->per_clear |= PER_CLEAR_ON_SETID;
-
/* Don't let someone trace a set[ug]id/setpcap binary with the revised
* credentials unless they have the appropriate permit.
*
* In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
*/
- is_setid = !uid_eq(new->euid, old->uid) || !gid_eq(new->egid, old->gid);
+ is_setid = __is_setuid(new, old) || __is_setgid(new, old);
- if ((is_setid ||
- !cap_issubset(new->cap_permitted, old->cap_permitted)) &&
+ if ((is_setid || __cap_gained(permitted, new, old)) &&
((bprm->unsafe & ~LSM_UNSAFE_PTRACE) ||
!ptracer_capable(current, new->user_ns))) {
/* downgrade; they get no more than they had, and maybe less */
@@ -779,7 +860,7 @@ skip:
new->sgid = new->fsgid = new->egid;
/* File caps or setid cancels ambient. */
- if (has_cap || is_setid)
+ if (has_fcap || is_setid)
cap_clear(new->cap_ambient);
/*
@@ -800,26 +881,10 @@ skip:
if (WARN_ON(!cap_ambient_invariant_ok(new)))
return -EPERM;
- /*
- * Audit candidate if current->cap_effective is set
- *
- * We do not bother to audit if 3 things are true:
- * 1) cap_effective has all caps
- * 2) we are root
- * 3) root is supposed to have all caps (SECURE_NOROOT)
- * Since this is just a normal root execing a process.
- *
- * Number 1 above might fail if you don't have a full bset, but I think
- * that is interesting information to audit.
- */
- if (!cap_issubset(new->cap_effective, new->cap_ambient)) {
- if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
- !uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) ||
- issecure(SECURE_NOROOT)) {
- ret = audit_log_bprm_fcaps(bprm, new, old);
- if (ret < 0)
- return ret;
- }
+ if (nonroot_raised_pE(new, old, root_uid, has_fcap)) {
+ ret = audit_log_bprm_fcaps(bprm, new, old);
+ if (ret < 0)
+ return ret;
}
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
@@ -829,13 +894,11 @@ skip:
/* Check for privilege-elevated exec. */
bprm->cap_elevated = 0;
- if (is_setid) {
+ if (is_setid ||
+ (!__is_real(root_uid, new) &&
+ (effective ||
+ __cap_grew(permitted, ambient, new))))
bprm->cap_elevated = 1;
- } else if (!uid_eq(new->uid, root_uid)) {
- if (effective ||
- !cap_issubset(new->cap_permitted, new->cap_ambient))
- bprm->cap_elevated = 1;
- }
return 0;
}
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 06554c448dce..6f9e4ce568cd 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -112,21 +112,25 @@ int __init integrity_init_keyring(const unsigned int id)
int __init integrity_load_x509(const unsigned int id, const char *path)
{
key_ref_t key;
- char *data;
+ void *data;
+ loff_t size;
int rc;
if (!keyring[id])
return -EINVAL;
- rc = integrity_read_file(path, &data);
- if (rc < 0)
+ rc = kernel_read_file_from_path(path, &data, &size, 0,
+ READING_X509_CERTIFICATE);
+ if (rc < 0) {
+ pr_err("Unable to open file: %s (%d)", path, rc);
return rc;
+ }
key = key_create_or_update(make_key_ref(keyring[id], 1),
"asymmetric",
NULL,
data,
- rc,
+ size,
((KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ),
KEY_ALLOC_NOT_IN_QUOTA);
@@ -139,6 +143,6 @@ int __init integrity_load_x509(const unsigned int id, const char *path)
key_ref_to_ptr(key)->description, path);
key_ref_put(key);
}
- kfree(data);
+ vfree(data);
return 0;
}
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index f5f12727771a..241aca315b0c 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -23,6 +23,9 @@
#define EVM_INIT_HMAC 0x0001
#define EVM_INIT_X509 0x0002
+#define EVM_SETUP 0x80000000 /* userland has signaled key load */
+
+#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP)
extern int evm_initialized;
extern char *evm_hmac;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 1d32cd20009a..bcd64baf8788 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -80,7 +80,7 @@ static struct shash_desc *init_desc(char type)
if (type == EVM_XATTR_HMAC) {
if (!(evm_initialized & EVM_INIT_HMAC)) {
- pr_err("HMAC key is not set\n");
+ pr_err_once("HMAC key is not set\n");
return ERR_PTR(-ENOKEY);
}
tfm = &hmac_tfm;
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 063d38aef64e..9826c02e2db8 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -49,6 +49,9 @@ char *evm_config_xattrnames[] = {
XATTR_NAME_SMACKMMAP,
#endif
#endif
+#ifdef CONFIG_SECURITY_APPARMOR
+ XATTR_NAME_APPARMOR,
+#endif
#ifdef CONFIG_IMA_APPRAISE
XATTR_NAME_IMA,
#endif
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index c8dccd54d501..319cf16d6603 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -40,7 +40,7 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
if (*ppos != 0)
return 0;
- sprintf(temp, "%d", evm_initialized);
+ sprintf(temp, "%d", (evm_initialized & ~EVM_SETUP));
rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
return rc;
@@ -61,24 +61,29 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
static ssize_t evm_write_key(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- char temp[80];
- int i;
+ int i, ret;
- if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_INIT_HMAC))
+ if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP))
return -EPERM;
- if (count >= sizeof(temp) || count == 0)
- return -EINVAL;
-
- if (copy_from_user(temp, buf, count) != 0)
- return -EFAULT;
+ ret = kstrtoint_from_user(buf, count, 0, &i);
- temp[count] = '\0';
+ if (ret)
+ return ret;
- if ((sscanf(temp, "%d", &i) != 1) || (i != 1))
+ /* Reject invalid values */
+ if (!i || (i & ~EVM_INIT_MASK) != 0)
return -EINVAL;
- evm_init_key();
+ if (i & EVM_INIT_HMAC) {
+ ret = evm_init_key();
+ if (ret != 0)
+ return ret;
+ /* Forbid further writes after the symmetric key is loaded */
+ i |= EVM_SETUP;
+ }
+
+ evm_initialized |= i;
return count;
}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 6fc888ca468e..c84e05866052 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -200,55 +200,6 @@ int integrity_kernel_read(struct file *file, loff_t offset,
}
/*
- * integrity_read_file - read entire file content into the buffer
- *
- * This is function opens a file, allocates the buffer of required
- * size, read entire file content to the buffer and closes the file
- *
- * It is used only by init code.
- *
- */
-int __init integrity_read_file(const char *path, char **data)
-{
- struct file *file;
- loff_t size;
- char *buf;
- int rc = -EINVAL;
-
- if (!path || !*path)
- return -EINVAL;
-
- file = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- pr_err("Unable to open file: %s (%d)", path, rc);
- return rc;
- }
-
- size = i_size_read(file_inode(file));
- if (size <= 0)
- goto out;
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = integrity_kernel_read(file, 0, buf, size);
- if (rc == size) {
- *data = buf;
- } else {
- kfree(buf);
- if (rc >= 0)
- rc = -EIO;
- }
-out:
- fput(file);
- return rc;
-}
-
-/*
* integrity_load_keys - load integrity keys hook
*
* Hooks is called from init/main.c:kernel_init_freeable()
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index c2edba8de35e..c7e8db0ea4c0 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -199,42 +199,59 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
struct inode *inode = file_inode(file);
const char *filename = file->f_path.dentry->d_name.name;
int result = 0;
+ int length;
+ void *tmpbuf;
+ u64 i_version;
struct {
struct ima_digest_data hdr;
char digest[IMA_MAX_DIGEST_SIZE];
} hash;
- if (!(iint->flags & IMA_COLLECTED)) {
- u64 i_version = file_inode(file)->i_version;
+ if (iint->flags & IMA_COLLECTED)
+ goto out;
- if (file->f_flags & O_DIRECT) {
- audit_cause = "failed(directio)";
- result = -EACCES;
- goto out;
- }
+ /*
+ * Dectecting file change is based on i_version. On filesystems
+ * which do not support i_version, support is limited to an initial
+ * measurement/appraisal/audit.
+ */
+ i_version = file_inode(file)->i_version;
+ hash.hdr.algo = algo;
- hash.hdr.algo = algo;
-
- result = (!buf) ? ima_calc_file_hash(file, &hash.hdr) :
- ima_calc_buffer_hash(buf, size, &hash.hdr);
- if (!result) {
- int length = sizeof(hash.hdr) + hash.hdr.length;
- void *tmpbuf = krealloc(iint->ima_hash, length,
- GFP_NOFS);
- if (tmpbuf) {
- iint->ima_hash = tmpbuf;
- memcpy(iint->ima_hash, &hash, length);
- iint->version = i_version;
- iint->flags |= IMA_COLLECTED;
- } else
- result = -ENOMEM;
- }
+ /* Initialize hash digest to 0's in case of failure */
+ memset(&hash.digest, 0, sizeof(hash.digest));
+
+ if (buf)
+ result = ima_calc_buffer_hash(buf, size, &hash.hdr);
+ else
+ result = ima_calc_file_hash(file, &hash.hdr);
+
+ if (result && result != -EBADF && result != -EINVAL)
+ goto out;
+
+ length = sizeof(hash.hdr) + hash.hdr.length;
+ tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS);
+ if (!tmpbuf) {
+ result = -ENOMEM;
+ goto out;
}
+
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
+
+ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ if (!result)
+ iint->flags |= IMA_COLLECTED;
out:
- if (result)
+ if (result) {
+ if (file->f_flags & O_DIRECT)
+ audit_cause = "failed(directio)";
+
integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
filename, "collect_data", audit_cause,
result, 0);
+ }
return result;
}
@@ -278,7 +295,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
}
result = ima_store_template(entry, violation, inode, filename, pcr);
- if (!result || result == -EEXIST) {
+ if ((!result || result == -EEXIST) && !(file->f_flags & O_DIRECT)) {
iint->flags |= IMA_MEASURED;
iint->measured_pcrs |= (0x1 << pcr);
}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 809ba70fbbbf..ec7dfa02c051 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -40,7 +40,7 @@ __setup("ima_appraise=", default_appraise_setup);
*/
bool is_ima_appraise_enabled(void)
{
- return (ima_appraise & IMA_APPRAISE_ENFORCE) ? 1 : 0;
+ return ima_appraise & IMA_APPRAISE_ENFORCE;
}
/*
@@ -405,7 +405,7 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
return -EINVAL;
ima_reset_appraise_flags(d_backing_inode(dentry),
- (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
+ xvalue->type == EVM_IMA_XATTR_DIGSIG);
result = 0;
}
return result;
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 802d5d20f36f..a856d8c9c9f3 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -441,6 +441,16 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
loff_t i_size;
int rc;
+ /*
+ * For consistency, fail file's opened with the O_DIRECT flag on
+ * filesystems mounted with/without DAX option.
+ */
+ if (file->f_flags & O_DIRECT) {
+ hash->length = hash_digest_size[ima_hash_algo];
+ hash->algo = ima_hash_algo;
+ return -EINVAL;
+ }
+
i_size = i_size_read(file_inode(file));
if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ad491c51e833..fa540c0469da 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -32,7 +32,7 @@ bool ima_canonical_fmt;
static int __init default_canonical_fmt_setup(char *str)
{
#ifdef __BIG_ENDIAN
- ima_canonical_fmt = 1;
+ ima_canonical_fmt = true;
#endif
return 1;
}
@@ -429,10 +429,10 @@ static int ima_release_policy(struct inode *inode, struct file *file)
}
ima_update_policy();
-#ifndef CONFIG_IMA_WRITE_POLICY
+#if !defined(CONFIG_IMA_WRITE_POLICY) && !defined(CONFIG_IMA_READ_POLICY)
securityfs_remove(ima_policy);
ima_policy = NULL;
-#else
+#elif defined(CONFIG_IMA_WRITE_POLICY)
clear_bit(IMA_FS_BUSY, &ima_fs_flags);
#endif
return 0;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 2aebb7984437..770654694efc 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -51,6 +51,8 @@ static int __init hash_setup(char *str)
ima_hash_algo = HASH_ALGO_SHA1;
else if (strncmp(str, "md5", 3) == 0)
ima_hash_algo = HASH_ALGO_MD5;
+ else
+ return 1;
goto out;
}
@@ -60,6 +62,8 @@ static int __init hash_setup(char *str)
break;
}
}
+ if (i == HASH_ALGO__LAST)
+ return 1;
out:
hash_setup_done = 1;
return 1;
@@ -235,11 +239,8 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
rc = ima_collect_measurement(iint, file, buf, size, hash_algo);
- if (rc != 0) {
- if (file->f_flags & O_DIRECT)
- rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
+ if (rc != 0 && rc != -EBADF && rc != -EINVAL)
goto out_digsig;
- }
if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
pathname = ima_d_path(&file->f_path, &pathbuf, filename);
@@ -247,12 +248,14 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
if (action & IMA_MEASURE)
ima_store_measurement(iint, file, pathname,
xattr_value, xattr_len, pcr);
- if (action & IMA_APPRAISE_SUBMASK)
+ if (rc == 0 && (action & IMA_APPRAISE_SUBMASK))
rc = ima_appraise_measurement(func, iint, file, pathname,
xattr_value, xattr_len, opened);
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
+ if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO))
+ rc = 0;
out_digsig:
if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG) &&
!(iint->flags & IMA_NEW_FILE))
@@ -359,12 +362,12 @@ void ima_post_path_mknod(struct dentry *dentry)
*/
int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
{
+ bool sig_enforce = is_module_sig_enforced();
+
if (!file && read_id == READING_MODULE) {
-#ifndef CONFIG_MODULE_SIG_FORCE
- if ((ima_appraise & IMA_APPRAISE_MODULES) &&
+ if (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES) &&
(ima_appraise & IMA_APPRAISE_ENFORCE))
return -EACCES; /* INTEGRITY_UNKNOWN */
-#endif
return 0; /* We rely on module signature checking */
}
return 0;
@@ -406,6 +409,10 @@ int ima_post_read_file(struct file *file, void *buf, loff_t size,
if (!file && read_id == READING_MODULE) /* MODULE_SIG_FORCE enabled */
return 0;
+ /* permit signed certs */
+ if (!file && read_id == READING_X509_CERTIFICATE)
+ return 0;
+
if (!file || !buf || size == 0) { /* should never happen */
if (ima_appraise & IMA_APPRAISE_ENFORCE)
return -EACCES;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 95209a5f8595..ee4613fa5840 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -196,9 +196,9 @@ static int __init policy_setup(char *str)
if ((strcmp(p, "tcb") == 0) && !ima_policy)
ima_policy = DEFAULT_TCB;
else if (strcmp(p, "appraise_tcb") == 0)
- ima_use_appraise_tcb = 1;
+ ima_use_appraise_tcb = true;
else if (strcmp(p, "secure_boot") == 0)
- ima_use_secure_boot = 1;
+ ima_use_secure_boot = true;
}
return 1;
@@ -207,7 +207,7 @@ __setup("ima_policy=", policy_setup);
static int __init default_appraise_policy_setup(char *str)
{
- ima_use_appraise_tcb = 1;
+ ima_use_appraise_tcb = true;
return 1;
}
__setup("ima_appraise_tcb", default_appraise_policy_setup);
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index a53e7e4ab06c..e1bf040fb110 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -120,8 +120,6 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
int integrity_kernel_read(struct file *file, loff_t offset,
void *addr, unsigned long count);
-int __init integrity_read_file(const char *path, char **data);
-
#define INTEGRITY_KEYRING_EVM 0
#define INTEGRITY_KEYRING_IMA 1
#define INTEGRITY_KEYRING_MODULE 2
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 286171a16ed2..14cc7940b36d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -4600,6 +4600,82 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
return 0;
}
+static int smack_inode_copy_up(struct dentry *dentry, struct cred **new)
+{
+
+ struct task_smack *tsp;
+ struct smack_known *skp;
+ struct inode_smack *isp;
+ struct cred *new_creds = *new;
+
+ if (new_creds == NULL) {
+ new_creds = prepare_creds();
+ if (new_creds == NULL)
+ return -ENOMEM;
+ }
+
+ tsp = new_creds->security;
+
+ /*
+ * Get label from overlay inode and set it in create_sid
+ */
+ isp = d_inode(dentry->d_parent)->i_security;
+ skp = isp->smk_inode;
+ tsp->smk_task = skp;
+ *new = new_creds;
+ return 0;
+}
+
+static int smack_inode_copy_up_xattr(const char *name)
+{
+ /*
+ * Return 1 if this is the smack access Smack attribute.
+ */
+ if (strcmp(name, XATTR_NAME_SMACK) == 0)
+ return 1;
+
+ return -EOPNOTSUPP;
+}
+
+static int smack_dentry_create_files_as(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old,
+ struct cred *new)
+{
+ struct task_smack *otsp = old->security;
+ struct task_smack *ntsp = new->security;
+ struct inode_smack *isp;
+ int may;
+
+ /*
+ * Use the process credential unless all of
+ * the transmuting criteria are met
+ */
+ ntsp->smk_task = otsp->smk_task;
+
+ /*
+ * the attribute of the containing directory
+ */
+ isp = d_inode(dentry->d_parent)->i_security;
+
+ if (isp->smk_flags & SMK_INODE_TRANSMUTE) {
+ rcu_read_lock();
+ may = smk_access_entry(otsp->smk_task->smk_known,
+ isp->smk_inode->smk_known,
+ &otsp->smk_task->smk_rules);
+ rcu_read_unlock();
+
+ /*
+ * If the directory is transmuting and the rule
+ * providing access is transmuting use the containing
+ * directory label instead of the process label.
+ */
+ if (may > 0 && (may & MAY_TRANSMUTE))
+ ntsp->smk_task = isp->smk_inode;
+ }
+ return 0;
+}
+
static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, smack_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
@@ -4735,6 +4811,9 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(inode_notifysecctx, smack_inode_notifysecctx),
LSM_HOOK_INIT(inode_setsecctx, smack_inode_setsecctx),
LSM_HOOK_INIT(inode_getsecctx, smack_inode_getsecctx),
+ LSM_HOOK_INIT(inode_copy_up, smack_inode_copy_up),
+ LSM_HOOK_INIT(inode_copy_up_xattr, smack_inode_copy_up_xattr),
+ LSM_HOOK_INIT(dentry_create_files_as, smack_dentry_create_files_as),
};
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index d330b060dcff..0f73fe30e37a 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -157,7 +157,7 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
if (!buffer)
return NULL;
- tomoyo_convert_time(get_seconds(), &stamp);
+ tomoyo_convert_time(ktime_get_real_seconds(), &stamp);
pos = snprintf(buffer, tomoyo_buffer_len - 1,
"#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s "
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 21691b99e61f..25eed4b0b0e8 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -2257,7 +2257,7 @@ static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = {
/* Timestamp counter for last updated. */
static unsigned int tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT];
/* Counter for number of updates. */
-static unsigned int tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
+static time64_t tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
/**
* tomoyo_update_stat - Update statistic counters.
@@ -2272,7 +2272,7 @@ void tomoyo_update_stat(const u8 index)
* I don't use atomic operations because race condition is not fatal.
*/
tomoyo_stat_updated[index]++;
- tomoyo_stat_modified[index] = get_seconds();
+ tomoyo_stat_modified[index] = ktime_get_real_seconds();
}
/**
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index e4097d7994b1..7adccdd8e36d 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -1037,7 +1037,7 @@ void tomoyo_check_acl(struct tomoyo_request_info *r,
bool (*check_entry) (struct tomoyo_request_info *,
const struct tomoyo_acl_info *));
void tomoyo_check_profile(void);
-void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp);
+void tomoyo_convert_time(time64_t time, struct tomoyo_time *stamp);
void tomoyo_del_condition(struct list_head *element);
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
void tomoyo_get_attributes(struct tomoyo_obj_info *obj);
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 580b318910f1..d3d9d9f1edb0 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -87,38 +87,17 @@ const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
* @stamp: Pointer to "struct tomoyo_time".
*
* Returns nothing.
- *
- * This function does not handle Y2038 problem.
*/
-void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp)
+void tomoyo_convert_time(time64_t time64, struct tomoyo_time *stamp)
{
- static const u16 tomoyo_eom[2][12] = {
- { 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
- { 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
- };
- u16 y;
- u8 m;
- bool r;
- stamp->sec = time % 60;
- time /= 60;
- stamp->min = time % 60;
- time /= 60;
- stamp->hour = time % 24;
- time /= 24;
- for (y = 1970; ; y++) {
- const unsigned short days = (y & 3) ? 365 : 366;
- if (time < days)
- break;
- time -= days;
- }
- r = (y & 3) == 0;
- for (m = 0; m < 11 && time >= tomoyo_eom[r][m]; m++)
- ;
- if (m)
- time -= tomoyo_eom[r][m - 1];
- stamp->year = y;
- stamp->month = ++m;
- stamp->day = ++time;
+ struct tm tm;
+ time64_to_tm(time64, 0, &tm);
+ stamp->sec = tm.tm_sec;
+ stamp->min = tm.tm_min;
+ stamp->hour = tm.tm_hour;
+ stamp->day = tm.tm_mday;
+ stamp->month = tm.tm_mon + 1;
+ stamp->year = tm.tm_year + 1900;
}
/**
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index 1ac0c423903e..6e47b823bcaa 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -159,6 +159,7 @@ static int __init snd_hrtimer_init(void)
timer->hw = hrtimer_hw;
timer->hw.resolution = resolution;
timer->hw.ticks = NANO_SEC / resolution;
+ timer->max_instances = 100; /* lower the limit */
err = snd_timer_global_register(timer);
if (err < 0) {
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index aaff9ee32695..b30b2139e3f0 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
if (!dp->timer->running)
len = snd_seq_oss_timer_start(dp->timer);
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
- if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
- snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
- ev->data.ext.ptr, ev->data.ext.len);
+ snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
} else {
len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
if (len > 0)
diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c
index 046cb586fb2f..06b21226b4e7 100644
--- a/sound/core/seq/oss/seq_oss_readq.c
+++ b/sound/core/seq/oss/seq_oss_readq.c
@@ -118,6 +118,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in
}
/*
+ * put MIDI sysex bytes; the event buffer may be chained, thus it has
+ * to be expanded via snd_seq_dump_var_event().
+ */
+struct readq_sysex_ctx {
+ struct seq_oss_readq *readq;
+ int dev;
+};
+
+static int readq_dump_sysex(void *ptr, void *buf, int count)
+{
+ struct readq_sysex_ctx *ctx = ptr;
+
+ return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
+}
+
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev)
+{
+ struct readq_sysex_ctx ctx = {
+ .readq = q,
+ .dev = dev
+ };
+
+ if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+ return 0;
+ return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
+}
+
+/*
* copy an event to input queue:
* return zero if enqueued
*/
diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h
index f1463f1f449e..8d033ca2d23f 100644
--- a/sound/core/seq/oss/seq_oss_readq.h
+++ b/sound/core/seq/oss/seq_oss_readq.h
@@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev);
int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 6cdd04a45962..15e82a656d96 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -180,7 +180,7 @@ static void snd_timer_request(struct snd_timer_id *tid)
*
* call this with register_mutex down.
*/
-static void snd_timer_check_slave(struct snd_timer_instance *slave)
+static int snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
@@ -190,16 +190,21 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave)
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
+ if (master->timer->num_instances >=
+ master->timer->max_instances)
+ return -EBUSY;
list_move_tail(&slave->open_list,
&master->slave_list_head);
+ master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
- return;
+ return 0;
}
}
}
+ return 0;
}
/*
@@ -208,7 +213,7 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave)
*
* call this with register_mutex down.
*/
-static void snd_timer_check_master(struct snd_timer_instance *master)
+static int snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
@@ -216,7 +221,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
+ if (master->timer->num_instances >=
+ master->timer->max_instances)
+ return -EBUSY;
list_move_tail(&slave->open_list, &master->slave_list_head);
+ master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
@@ -228,8 +237,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
spin_unlock_irq(&slave_active_lock);
}
}
+ return 0;
}
+static int snd_timer_close_locked(struct snd_timer_instance *timeri);
+
/*
* open a timer instance
* when opening a master, the slave id must be here given.
@@ -240,6 +252,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
+ int err;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
@@ -259,10 +272,14 @@ int snd_timer_open(struct snd_timer_instance **ti,
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
- snd_timer_check_slave(timeri);
+ err = snd_timer_check_slave(timeri);
+ if (err < 0) {
+ snd_timer_close_locked(timeri);
+ timeri = NULL;
+ }
mutex_unlock(&register_mutex);
*ti = timeri;
- return 0;
+ return err;
}
/* open a master instance */
@@ -288,6 +305,10 @@ int snd_timer_open(struct snd_timer_instance **ti,
return -EBUSY;
}
}
+ if (timer->num_instances >= timer->max_instances) {
+ mutex_unlock(&register_mutex);
+ return -EBUSY;
+ }
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(&register_mutex);
@@ -314,25 +335,27 @@ int snd_timer_open(struct snd_timer_instance **ti,
}
list_add_tail(&timeri->open_list, &timer->open_list_head);
- snd_timer_check_master(timeri);
+ timer->num_instances++;
+ err = snd_timer_check_master(timeri);
+ if (err < 0) {
+ snd_timer_close_locked(timeri);
+ timeri = NULL;
+ }
mutex_unlock(&register_mutex);
*ti = timeri;
- return 0;
+ return err;
}
EXPORT_SYMBOL(snd_timer_open);
/*
* close a timer instance
+ * call this with register_mutex down.
*/
-int snd_timer_close(struct snd_timer_instance *timeri)
+static int snd_timer_close_locked(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
- if (snd_BUG_ON(!timeri))
- return -ENXIO;
-
- mutex_lock(&register_mutex);
list_del(&timeri->open_list);
/* force to stop the timer */
@@ -340,6 +363,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
timer = timeri->timer;
if (timer) {
+ timer->num_instances--;
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
@@ -355,6 +379,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
+ timer->num_instances--;
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
@@ -382,9 +407,24 @@ int snd_timer_close(struct snd_timer_instance *timeri)
module_put(timer->module);
}
- mutex_unlock(&register_mutex);
return 0;
}
+
+/*
+ * close a timer instance
+ */
+int snd_timer_close(struct snd_timer_instance *timeri)
+{
+ int err;
+
+ if (snd_BUG_ON(!timeri))
+ return -ENXIO;
+
+ mutex_lock(&register_mutex);
+ err = snd_timer_close_locked(timeri);
+ mutex_unlock(&register_mutex);
+ return err;
+}
EXPORT_SYMBOL(snd_timer_close);
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
@@ -856,6 +896,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
+ timer->max_instances = 1000; /* default limit per timer */
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 546d515f3c1f..dce0682c5001 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6544,6 +6544,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x14, 0x90170110},
{0x1b, 0x90a70130},
{0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0xb7a60130},
+ {0x13, 0xb8a61140},
+ {0x16, 0x90170110},
+ {0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
{0x12, 0x90a60130},
{0x14, 0x90170110},
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 4f5f18f22974..20624320b753 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1375,6 +1375,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case 0x199:
return SNDRV_PCM_FMTBIT_DSD_U32_LE;
case 0x19b:
+ case 0x203:
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
default:
break;
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 6598fb76d2c2..9816590d3ad2 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -829,6 +829,7 @@ struct drm_i915_gem_exec_fence {
#define I915_EXEC_FENCE_WAIT (1<<0)
#define I915_EXEC_FENCE_SIGNAL (1<<1)
+#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
__u32 flags;
};
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 771ddab94bb0..d5d7fff1c211 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1138,6 +1138,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
return err;
}
+static void trace__symbols__exit(struct trace *trace)
+{
+ machine__exit(trace->host);
+ trace->host = NULL;
+
+ symbol__exit();
+}
+
static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
{
int idx;
@@ -2481,6 +2489,8 @@ out_disable:
}
out_delete_evlist:
+ trace__symbols__exit(trace);
+
perf_evlist__delete(evlist);
trace->evlist = NULL;
trace->live = false;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 6680e4fb7967..025729510525 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -5,6 +5,7 @@
%option stack
%option bison-locations
%option yylineno
+%option reject
%{
#include <errno.h>
@@ -339,8 +340,8 @@ r{num_raw_hex} { return raw(yyscanner); }
{num_hex} { return value(yyscanner, 16); }
{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); }
-{bpf_object} { if (!isbpf(yyscanner)) USER_REJECT; return str(yyscanner, PE_BPF_OBJECT); }
-{bpf_source} { if (!isbpf(yyscanner)) USER_REJECT; return str(yyscanner, PE_BPF_SOURCE); }
+{bpf_object} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_OBJECT); }
+{bpf_source} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_SOURCE); }
{name} { return pmu_str_check(yyscanner); }
"/" { BEGIN(config); return '/'; }
- { return '-'; }