From 45e898b735620f426eddf105fc886d2966593a58 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 9 Nov 2015 19:09:25 -0500 Subject: locking/pvqspinlock: Collect slowpath lock statistics This patch enables the accumulation of kicking and waiting related PV qspinlock statistics when the new QUEUED_LOCK_STAT configuration option is selected. It also enables the collection of data which enable us to calculate the kicking and wakeup latencies which have a heavy dependency on the CPUs being used. The statistical counters are per-cpu variables to minimize the performance overhead in their updates. These counters are exported via the debugfs filesystem under the qlockstat directory. When the corresponding debugfs files are read, summation and computing of the required data are then performed. The measured latencies for different CPUs are: CPU Wakeup Kicking --- ------ ------- Haswell-EX 63.6us 7.4us Westmere-EX 67.6us 9.3us The measured latencies varied a bit from run-to-run. The wakeup latency is much higher than the kicking latency. A sample of statistical counters after system bootup (with vCPU overcommit) was: pv_hash_hops=1.00 pv_kick_unlock=1148 pv_kick_wake=1146 pv_latency_kick=11040 pv_latency_wake=194840 pv_spurious_wakeup=7 pv_wait_again=4 pv_wait_head=23 pv_wait_node=1129 Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Davidlohr Bueso Cc: Douglas Hatch Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Scott J Norton Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1447114167-47185-6-git-send-email-Waiman.Long@hpe.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/x86/Kconfig') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index db3622f22b61..965fc4216f76 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -687,6 +687,14 @@ config PARAVIRT_SPINLOCKS If you are unsure how to answer this question, answer Y. +config QUEUED_LOCK_STAT + bool "Paravirt queued spinlock statistics" + depends on PARAVIRT_SPINLOCKS && DEBUG_FS && QUEUED_SPINLOCKS + ---help--- + Enable the collection of statistical data on the slowpath + behavior of paravirtualized queued spinlocks and report + them on debugfs. + source "arch/x86/xen/Kconfig" config KVM_GUEST -- cgit v1.2.3 From 6e1315fe82308cd29e7550eab967262e8bbc71a3 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 7 Dec 2015 10:39:42 +0100 Subject: x86/cpu: Provide a config option to disable static_cpu_has This brings .text savings of about ~1.6K when building a tinyconfig. It is off by default so nothing changes for the default. Kconfig help text from Josh. Signed-off-by: Borislav Petkov Reviewed-by: Josh Triplett Link: http://lkml.kernel.org/r/1449481182-27541-5-git-send-email-bp@alien8.de Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig | 11 +++++++++++ arch/x86/include/asm/cpufeature.h | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/x86/Kconfig') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index db3622f22b61..a2abc2fb6970 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -349,6 +349,17 @@ config X86_FEATURE_NAMES If in doubt, say Y. +config X86_FAST_FEATURE_TESTS + bool "Fast CPU feature tests" if EMBEDDED + default y + ---help--- + Some fast-paths in the kernel depend on the capabilities of the CPU. + Say Y here for the kernel to patch in the appropriate code at runtime + based on the capabilities of the CPU. The infrastructure for patching + code at runtime takes up some additional space; space-constrained + embedded systems may wish to say N here to produce smaller, slightly + slower code. + config X86_X2APIC bool "Support x2apic" depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 144b042c0872..43e144474043 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -409,7 +409,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; * fast paths and boot_cpu_has() otherwise! */ -#if __GNUC__ >= 4 +#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS) extern void warn_pre_alternatives(void); extern bool __static_cpu_has_safe(u16 bit); -- cgit v1.2.3 From eebb3e8d8aaf28f4bcaf12fd3645350bfd2f0b36 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 12 Dec 2015 02:45:06 +0100 Subject: ACPI / LPSS: override power state for LPSS DMA device This is a third approach to workaround long standing issue with LPSS on BayTrail. First one [1] was reverted since it didn't resolve the issue comprehensively. Second one [2] was rejected by internal review. The LPSS DMA controller does not have neither _PS0 nor _PS3 method. Moreover it can be powered off automatically whenever the last LPSS device goes down. In case of no power any access to the DMA controller will hang the system. The behaviour is reproduced on some HP laptops based on Intel BayTrail [3,4] as well as on ASuS T100TA transformer. Power on the LPSS island through the registers accessible in a specific way. [1] http://www.spinics.net/lists/linux-acpi/msg53963.html [2] https://bugzilla.redhat.com/attachment.cgi?id=1066779&action=diff [3] https://bugzilla.redhat.com/show_bug.cgi?id=1184273 [4] http://www.spinics.net/lists/dmaengine/msg01514.html Signed-off-by: Andy Shevchenko Signed-off-by: Rafael J. Wysocki --- arch/x86/Kconfig | 3 +- arch/x86/include/asm/iosf_mbi.h | 2 + drivers/acpi/acpi_lpss.c | 153 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 150 insertions(+), 8 deletions(-) (limited to 'arch/x86/Kconfig') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index db3622f22b61..790aa3ee1afa 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -523,9 +523,10 @@ config X86_INTEL_QUARK config X86_INTEL_LPSS bool "Intel Low Power Subsystem Support" - depends on ACPI + depends on X86 && ACPI select COMMON_CLK select PINCTRL + select IOSF_MBI ---help--- Select to build support for Intel Low Power Subsystem such as found on Intel Lynxpoint PCH. Selecting this option enables diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h index cdc5f6352ac5..b41ee164930a 100644 --- a/arch/x86/include/asm/iosf_mbi.h +++ b/arch/x86/include/asm/iosf_mbi.h @@ -19,6 +19,8 @@ /* IOSF SB read/write opcodes */ #define MBI_MMIO_READ 0x00 #define MBI_MMIO_WRITE 0x01 +#define MBI_CFG_READ 0x04 +#define MBI_CFG_WRITE 0x05 #define MBI_CR_READ 0x06 #define MBI_CR_WRITE 0x07 #define MBI_REG_READ 0x10 diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index a10c2d665ec2..84d3d90557d1 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -26,6 +27,10 @@ ACPI_MODULE_NAME("acpi_lpss"); #ifdef CONFIG_X86_INTEL_LPSS +#include +#include +#include + #define LPSS_ADDR(desc) ((unsigned long)&desc) #define LPSS_CLK_SIZE 0x04 @@ -71,7 +76,7 @@ struct lpss_device_desc { void (*setup)(struct lpss_private_data *pdata); }; -static struct lpss_device_desc lpss_dma_desc = { +static const struct lpss_device_desc lpss_dma_desc = { .flags = LPSS_CLK, }; @@ -84,6 +89,23 @@ struct lpss_private_data { u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; }; +/* LPSS run time quirks */ +static unsigned int lpss_quirks; + +/* + * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device. + * + * The LPSS DMA controller does not have neither _PS0 nor _PS3 method. Moreover + * it can be powered off automatically whenever the last LPSS device goes down. + * In case of no power any access to the DMA controller will hang the system. + * The behaviour is reproduced on some HP laptops based on Intel BayTrail as + * well as on ASuS T100TA transformer. + * + * This quirk overrides power state of entire LPSS island to keep DMA powered + * on whenever we have at least one other device in use. + */ +#define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0) + /* UART Component Parameter Register */ #define LPSS_UART_CPR 0xF4 #define LPSS_UART_CPR_AFCE BIT(4) @@ -196,13 +218,21 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = { .setup = byt_i2c_setup, }; -static struct lpss_device_desc bsw_spi_dev_desc = { +static const struct lpss_device_desc bsw_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, .prv_offset = 0x400, .setup = lpss_deassert_reset, }; +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +static const struct x86_cpu_id lpss_cpu_ids[] = { + ICPU(0x37), /* Valleyview, Bay Trail */ + ICPU(0x4c), /* Braswell, Cherry Trail */ + {} +}; + #else #define LPSS_ADDR(desc) (0UL) @@ -661,6 +691,89 @@ static int acpi_lpss_resume_early(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ +/* IOSF SB for LPSS island */ +#define LPSS_IOSF_UNIT_LPIOEP 0xA0 +#define LPSS_IOSF_UNIT_LPIO1 0xAB +#define LPSS_IOSF_UNIT_LPIO2 0xAC + +#define LPSS_IOSF_PMCSR 0x84 +#define LPSS_PMCSR_D0 0 +#define LPSS_PMCSR_D3hot 3 +#define LPSS_PMCSR_Dx_MASK GENMASK(1, 0) + +#define LPSS_IOSF_GPIODEF0 0x154 +#define LPSS_GPIODEF0_DMA1_D3 BIT(2) +#define LPSS_GPIODEF0_DMA2_D3 BIT(3) +#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2) + +static DEFINE_MUTEX(lpss_iosf_mutex); + +static void lpss_iosf_enter_d3_state(void) +{ + u32 value1 = 0; + u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK; + u32 value2 = LPSS_PMCSR_D3hot; + u32 mask2 = LPSS_PMCSR_Dx_MASK; + /* + * PMC provides an information about actual status of the LPSS devices. + * Here we read the values related to LPSS power island, i.e. LPSS + * devices, excluding both LPSS DMA controllers, along with SCC domain. + */ + u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe; + int ret; + + ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis); + if (ret) + return; + + mutex_lock(&lpss_iosf_mutex); + + ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0); + if (ret) + goto exit; + + /* + * Get the status of entire LPSS power island per device basis. + * Shutdown both LPSS DMA controllers if and only if all other devices + * are already in D3hot. + */ + pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask; + if (pmc_status) + goto exit; + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, + LPSS_IOSF_GPIODEF0, value1, mask1); +exit: + mutex_unlock(&lpss_iosf_mutex); +} + +static void lpss_iosf_exit_d3_state(void) +{ + u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3; + u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK; + u32 value2 = LPSS_PMCSR_D0; + u32 mask2 = LPSS_PMCSR_Dx_MASK; + + mutex_lock(&lpss_iosf_mutex); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, + LPSS_IOSF_GPIODEF0, value1, mask1); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + mutex_unlock(&lpss_iosf_mutex); +} + static int acpi_lpss_runtime_suspend(struct device *dev) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); @@ -673,7 +786,17 @@ static int acpi_lpss_runtime_suspend(struct device *dev) if (pdata->dev_desc->flags & LPSS_SAVE_CTX) acpi_lpss_save_ctx(dev, pdata); - return acpi_dev_runtime_suspend(dev); + ret = acpi_dev_runtime_suspend(dev); + + /* + * This call must be last in the sequence, otherwise PMC will return + * wrong status for devices being about to be powered off. See + * lpss_iosf_enter_d3_state() for further information. + */ + if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + lpss_iosf_enter_d3_state(); + + return ret; } static int acpi_lpss_runtime_resume(struct device *dev) @@ -681,6 +804,13 @@ static int acpi_lpss_runtime_resume(struct device *dev) struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; + /* + * This call is kept first to be in symmetry with + * acpi_lpss_runtime_suspend() one. + */ + if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + lpss_iosf_exit_d3_state(); + ret = acpi_dev_runtime_resume(dev); if (ret) return ret; @@ -798,10 +928,19 @@ static struct acpi_scan_handler lpss_handler = { void __init acpi_lpss_init(void) { - if (!lpt_clk_init()) { - bus_register_notifier(&platform_bus_type, &acpi_lpss_nb); - acpi_scan_add_handler(&lpss_handler); - } + const struct x86_cpu_id *id; + int ret; + + ret = lpt_clk_init(); + if (ret) + return; + + id = x86_match_cpu(lpss_cpu_ids); + if (id) + lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON; + + bus_register_notifier(&platform_bus_type, &acpi_lpss_nb); + acpi_scan_add_handler(&lpss_handler); } #else -- cgit v1.2.3 From 21266be9ed542f13436bd9c75316d43e1e84f6ae Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 19 Nov 2015 18:19:29 -0800 Subject: arch: consolidate CONFIG_STRICT_DEVM in lib/Kconfig.debug Let all the archs that implement devmem_is_allowed() opt-in to a common definition of CONFIG_STRICT_DEVM in lib/Kconfig.debug. Cc: Kees Cook Cc: Russell King Cc: Will Deacon Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andrew Morton Cc: Greg Kroah-Hartman Cc: "David S. Miller" Acked-by: Catalin Marinas Acked-by: Heiko Carstens [heiko: drop 'default y' for s390] Acked-by: Ingo Molnar Suggested-by: Arnd Bergmann Signed-off-by: Dan Williams --- arch/arm/Kconfig | 1 + arch/arm/Kconfig.debug | 14 -------------- arch/arm64/Kconfig | 1 + arch/arm64/Kconfig.debug | 14 -------------- arch/frv/Kconfig | 1 + arch/m32r/Kconfig | 1 + arch/powerpc/Kconfig | 1 + arch/powerpc/Kconfig.debug | 12 ------------ arch/s390/Kconfig | 1 + arch/s390/Kconfig.debug | 12 ------------ arch/tile/Kconfig | 4 +--- arch/unicore32/Kconfig | 1 + arch/unicore32/Kconfig.debug | 14 -------------- arch/x86/Kconfig | 1 + arch/x86/Kconfig.debug | 17 ----------------- lib/Kconfig.debug | 22 ++++++++++++++++++++++ 16 files changed, 31 insertions(+), 86 deletions(-) (limited to 'arch/x86/Kconfig') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 34e1569a11ee..b8a47974c2d7 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2,6 +2,7 @@ config ARM bool default y select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 259c0ca9c99a..e356357d86bb 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -15,20 +15,6 @@ config ARM_PTDUMP kernel. If in doubt, say "N" -config STRICT_DEVMEM - bool "Filter access to /dev/mem" - depends on MMU - ---help--- - If this option is disabled, you allow userspace (root) access to all - of memory, including kernel and userspace memory. Accidental - access to this is obviously disastrous, but specific access can - be used by people debugging the kernel. - - If this option is switched on, the /dev/mem file only allows - userspace access to memory mapped peripherals. - - If in doubt, say Y. - # RMK wants arm kernels compiled with frame pointers or stack unwinding. # If you know what you are doing and are willing to live without stack # traces, you can get a slightly smaller kernel by setting this option to diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 871f21783866..08f64b455aa8 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -3,6 +3,7 @@ config ARM64 select ACPI_CCA_REQUIRED if ACPI select ACPI_GENERIC_GSI if ACPI select ACPI_REDUCED_HARDWARE_ONLY if ACPI + select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 04fb73b973f1..e13c4bf84d9e 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -14,20 +14,6 @@ config ARM64_PTDUMP kernel. If in doubt, say "N" -config STRICT_DEVMEM - bool "Filter access to /dev/mem" - depends on MMU - help - If this option is disabled, you allow userspace (root) access to all - of memory, including kernel and userspace memory. Accidental - access to this is obviously disastrous, but specific access can - be used by people debugging the kernel. - - If this option is switched on, the /dev/mem file only allows - userspace access to memory mapped peripherals. - - If in doubt, say Y. - config PID_IN_CONTEXTIDR bool "Write the current PID to the CONTEXTIDR register" help diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 34aa19352dc1..03bfd6bf03e7 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -10,6 +10,7 @@ config FRV select HAVE_DEBUG_BUGVERBOSE select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_CPU_DEVICES + select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_WANT_IPC_PARSE_VERSION select OLD_SIGSUSPEND3 select OLD_SIGACTION diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 9e44bbd8051e..836ac5a963c8 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -13,6 +13,7 @@ config M32R select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 + select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_USES_GETTIMEOFFSET select MODULES_USE_ELF_RELA select HAVE_DEBUG_STACKOVERFLOW diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index db49e0d796b1..85eabc49de61 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -159,6 +159,7 @@ config PPC select EDAC_SUPPORT select EDAC_ATOMIC_SCRUB select ARCH_HAS_DMA_SET_COHERENT_MASK + select ARCH_HAS_DEVMEM_IS_ALLOWED select HAVE_ARCH_SECCOMP_FILTER config GENERIC_CSUM diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 3a510f4a6b68..a0e44a9c456f 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -335,18 +335,6 @@ config PPC_EARLY_DEBUG_CPM_ADDR platform probing is done, all platforms selected must share the same address. -config STRICT_DEVMEM - def_bool y - prompt "Filter access to /dev/mem" - help - This option restricts access to /dev/mem. If this option is - disabled, you allow userspace access to all memory, including - kernel and userspace memory. Accidental memory access is likely - to be disastrous. - Memory access is required for experts who want to debug the kernel. - - If you are unsure, say Y. - config FAIL_IOMMU bool "Fault-injection capability for IOMMU" depends on FAULT_INJECTION diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 3a55f493c7da..779becb895be 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -66,6 +66,7 @@ config S390 def_bool y select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SG_CHAIN diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index c56878e1245f..26c5d5beb4be 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug @@ -5,18 +5,6 @@ config TRACE_IRQFLAGS_SUPPORT source "lib/Kconfig.debug" -config STRICT_DEVMEM - def_bool y - prompt "Filter access to /dev/mem" - ---help--- - This option restricts access to /dev/mem. If this option is - disabled, you allow userspace access to all memory, including - kernel and userspace memory. Accidental memory access is likely - to be disastrous. - Memory access is required for experts who want to debug the kernel. - - If you are unsure, say Y. - config S390_PTDUMP bool "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 106c21bd7f44..cf3116887509 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -19,6 +19,7 @@ config TILE select VIRT_TO_BUS select SYS_HYPERVISOR select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA @@ -116,9 +117,6 @@ config ARCH_DISCONTIGMEM_DEFAULT config TRACE_IRQFLAGS_SUPPORT def_bool y -config STRICT_DEVMEM - def_bool y - # SMP is required for Tilera Linux. config SMP def_bool y diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index c9faddc61100..5dc4c0a43ccd 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -1,5 +1,6 @@ config UNICORE32 def_bool y + select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select HAVE_MEMBLOCK diff --git a/arch/unicore32/Kconfig.debug b/arch/unicore32/Kconfig.debug index 1a3626239843..f075bbe1d46f 100644 --- a/arch/unicore32/Kconfig.debug +++ b/arch/unicore32/Kconfig.debug @@ -2,20 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -config STRICT_DEVMEM - bool "Filter access to /dev/mem" - depends on MMU - ---help--- - If this option is disabled, you allow userspace (root) access to all - of memory, including kernel and userspace memory. Accidental - access to this is obviously disastrous, but specific access can - be used by people debugging the kernel. - - If this option is switched on, the /dev/mem file only allows - userspace access to memory mapped peripherals. - - If in doubt, say Y. - config EARLY_PRINTK def_bool DEBUG_OCD help diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index db3622f22b61..75fba1fc205d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -24,6 +24,7 @@ config X86 select ARCH_DISCARD_MEMBLOCK select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 137dfa96aa14..1116452fcfc2 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -5,23 +5,6 @@ config TRACE_IRQFLAGS_SUPPORT source "lib/Kconfig.debug" -config STRICT_DEVMEM - bool "Filter access to /dev/mem" - ---help--- - If this option is disabled, you allow userspace (root) access to all - of memory, including kernel and userspace memory. Accidental - access to this is obviously disastrous, but specific access can - be used by people debugging the kernel. Note that with PAT support - enabled, even in this case there are restrictions on /dev/mem - use due to the cache aliasing requirements. - - If this option is switched on, the /dev/mem file only allows - userspace access to PCI space and the BIOS code and data regions. - This is sufficient for dosemu and X and all common users of - /dev/mem. - - If in doubt, say Y. - config X86_VERBOSE_BOOTUP bool "Enable verbose x86 bootup info messages" default y diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8c15b29d5adc..289dfcbc14eb 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1853,3 +1853,25 @@ source "samples/Kconfig" source "lib/Kconfig.kgdb" +config ARCH_HAS_DEVMEM_IS_ALLOWED + bool + +config STRICT_DEVMEM + bool "Filter access to /dev/mem" + depends on MMU + depends on ARCH_HAS_DEVMEM_IS_ALLOWED + default y if TILE || PPC + ---help--- + If this option is disabled, you allow userspace (root) access to all + of memory, including kernel and userspace memory. Accidental + access to this is obviously disastrous, but specific access can + be used by people debugging the kernel. Note that with PAT support + enabled, even in this case there are restrictions on /dev/mem + use due to the cache aliasing requirements. + + If this option is switched on, the /dev/mem file only allows + userspace access to PCI space and the BIOS code and data regions. + This is sufficient for dosemu and X and all common users of + /dev/mem. + + If in doubt, say Y. -- cgit v1.2.3 From 9e08f57d684ac2f40685f55f659564bfd91a971e Mon Sep 17 00:00:00 2001 From: Daniel Cashman Date: Thu, 14 Jan 2016 15:20:06 -0800 Subject: x86: mm: support ARCH_MMAP_RND_BITS x86: arch_mmap_rnd() uses hard-coded values, 8 for 32-bit and 28 for 64-bit, to generate the random offset for the mmap base address. This value represents a compromise between increased ASLR effectiveness and avoiding address-space fragmentation. Replace it with a Kconfig option, which is sensibly bounded, so that platform developers may choose where to place this compromise. Keep default values as new minimums. Signed-off-by: Daniel Cashman Cc: Russell King Acked-by: Kees Cook Cc: Ingo Molnar Cc: Jonathan Corbet Cc: Don Zickus Cc: Eric W. Biederman Cc: Heinrich Schuchardt Cc: Josh Poimboeuf Cc: Kirill A. Shutemov Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Mel Gorman Cc: Thomas Gleixner Cc: David Rientjes Cc: Mark Salyzyn Cc: Jeff Vander Stoep Cc: Nick Kralevich Cc: Catalin Marinas Cc: Will Deacon Cc: "H. Peter Anvin" Cc: Hector Marco-Gisbert Cc: Borislav Petkov Cc: Ralf Baechle Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/Kconfig | 16 ++++++++++++++++ arch/x86/mm/mmap.c | 12 ++++++------ 2 files changed, 22 insertions(+), 6 deletions(-) (limited to 'arch/x86/Kconfig') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5d2293417946..24f362bf3ec6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -83,6 +83,8 @@ config X86 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP select HAVE_ARCH_KGDB select HAVE_ARCH_KMEMCHECK + select HAVE_ARCH_MMAP_RND_BITS if MMU + select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SOFT_DIRTY if X86_64 select HAVE_ARCH_TRACEHOOK @@ -184,6 +186,20 @@ config HAVE_LATENCYTOP_SUPPORT config MMU def_bool y +config ARCH_MMAP_RND_BITS_MIN + default 28 if 64BIT + default 8 + +config ARCH_MMAP_RND_BITS_MAX + default 32 if 64BIT + default 16 + +config ARCH_MMAP_RND_COMPAT_BITS_MIN + default 8 + +config ARCH_MMAP_RND_COMPAT_BITS_MAX + default 16 + config SBUS bool diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 844b06d67df4..96bd1e2bffaf 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -69,14 +69,14 @@ unsigned long arch_mmap_rnd(void) { unsigned long rnd; - /* - * 8 bits of randomness in 32bit mmaps, 20 address space bits - * 28 bits of randomness in 64bit mmaps, 40 address space bits - */ if (mmap_is_ia32()) - rnd = (unsigned long)get_random_int() % (1<<8); +#ifdef CONFIG_COMPAT + rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1); +#else + rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); +#endif else - rnd = (unsigned long)get_random_int() % (1<<28); + rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); return rnd << PAGE_SHIFT; } -- cgit v1.2.3 From 185a383ada2e7794b0e82e040223e741b24d2bf8 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 12 Jan 2016 13:18:10 -0700 Subject: x86/PCI: Add driver for Intel Volume Management Device (VMD) The Intel Volume Management Device (VMD) is a Root Complex Integrated Endpoint that acts as a host bridge to a secondary PCIe domain. BIOS can reassign one or more Root Ports to appear within a VMD domain instead of the primary domain. The immediate benefit is that additional PCIe domains allow more than 256 buses in a system by letting bus numbers be reused across different domains. VMD domains do not define ACPI _SEG, so to avoid domain clashing with host bridges defining this segment, VMD domains start at 0x10000, which is greater than the highest possible 16-bit ACPI defined _SEG. This driver enumerates and enables the domain using the root bus configuration interface provided by the PCI subsystem. The driver provides configuration space accessor functions (pci_ops), bus and memory resources, an MSI IRQ domain with irq_chip implementation, and DMA operations necessary to use devices through the VMD endpoint's interface. VMD routes I/O as follows: 1) Configuration Space: BAR 0 ("CFGBAR") of VMD provides the base address and size for configuration space register access to VMD-owned root ports. It works similarly to MMCONFIG for extended configuration space. Bus numbering is independent and does not conflict with the primary domain. 2) MMIO Space: BARs 2 and 4 ("MEMBAR1" and "MEMBAR2") of VMD provide the base address, size, and type for MMIO register access. These addresses are not translated by VMD hardware; they are simply reservations to be distributed to root ports' memory base/limit registers and subdivided among devices downstream. 3) DMA: To interact appropriately with an IOMMU, the source ID DMA read and write requests are translated to the bus-device-function of the VMD endpoint. Otherwise, DMA operates normally without VMD-specific address translation. 4) Interrupts: Part of VMD's BAR 4 is reserved for VMD's MSI-X Table and PBA. MSIs from VMD domain devices and ports are remapped to appear as if they were issued using one of VMD's MSI-X table entries. Each MSI and MSI-X address of VMD-owned devices and ports has a special format where the address refers to specific entries in the VMD's MSI-X table. As with DMA, the interrupt source ID is translated to VMD's bus-device-function. The driver provides its own MSI and MSI-X configuration functions specific to how MSI messages are used within the VMD domain, and provides an irq_chip for independent IRQ allocation to relay interrupts from VMD's interrupt handler to the appropriate device driver's handler. 5) Errors: PCIe error message are intercepted by the root ports normally (e.g., AER), except with VMD, system errors (i.e., firmware first) are disabled by default. AER and hotplug interrupts are translated in the same way as endpoint interrupts. 6) VMD does not support INTx interrupts or IO ports. Devices or drivers requiring these features should either not be placed below VMD-owned root ports, or VMD should be disabled by BIOS for such endpoints. [bhelgaas: add VMD BAR #defines, factor out vmd_cfg_addr(), rework VMD resource setup, whitespace, changelog] Signed-off-by: Keith Busch Signed-off-by: Bjorn Helgaas Acked-by: Thomas Gleixner (IRQ-related parts) --- MAINTAINERS | 6 + arch/x86/Kconfig | 13 + arch/x86/include/asm/hw_irq.h | 5 + arch/x86/pci/Makefile | 2 + arch/x86/pci/vmd.c | 723 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 749 insertions(+) create mode 100644 arch/x86/pci/vmd.c (limited to 'arch/x86/Kconfig') diff --git a/MAINTAINERS b/MAINTAINERS index 050d0e77a2cf..ce47e08e3619 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8216,6 +8216,12 @@ S: Maintained F: Documentation/devicetree/bindings/pci/host-generic-pci.txt F: drivers/pci/host/pci-host-generic.c +PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) +M: Keith Busch +L: linux-pci@vger.kernel.org +S: Supported +F: arch/x86/pci/vmd.c + PCIE DRIVER FOR ST SPEAR13XX M: Pratyush Anand L: linux-pci@vger.kernel.org diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index db3622f22b61..3e6aca822295 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2665,6 +2665,19 @@ config PMC_ATOM def_bool y depends on PCI +config VMD + depends on PCI_MSI + tristate "Volume Management Device Driver" + default N + ---help--- + Adds support for the Intel Volume Management Device (VMD). VMD is a + secondary PCI host bridge that allows PCI Express root ports, + and devices attached to them, to be removed from the default + PCI domain and placed within the VMD domain. This provides + more bus resources than are otherwise possible with a + single domain. If you know your system provides one of these and + has devices attached to it, say Y; if you are not sure, say N. + source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 1e3408e88604..1815b736269d 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -129,6 +129,11 @@ struct irq_alloc_info { unsigned long uv_offset; char *uv_name; }; +#endif +#if IS_ENABLED(CONFIG_VMD) + struct { + struct msi_desc *desc; + }; #endif }; }; diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index 5c6fc3577a49..97062a635b77 100644 --- a/arch/x86/pci/Makefile +++ b/arch/x86/pci/Makefile @@ -23,6 +23,8 @@ obj-y += bus_numa.o obj-$(CONFIG_AMD_NB) += amd_bus.o obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o +obj-$(CONFIG_VMD) += vmd.o + ifeq ($(CONFIG_PCI_DEBUG),y) EXTRA_CFLAGS += -DDEBUG endif diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c new file mode 100644 index 000000000000..d57e48016f15 --- /dev/null +++ b/arch/x86/pci/vmd.c @@ -0,0 +1,723 @@ +/* + * Volume Management Device driver + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define VMD_CFGBAR 0 +#define VMD_MEMBAR1 2 +#define VMD_MEMBAR2 4 + +/* + * Lock for manipulating VMD IRQ lists. + */ +static DEFINE_RAW_SPINLOCK(list_lock); + +/** + * struct vmd_irq - private data to map driver IRQ to the VMD shared vector + * @node: list item for parent traversal. + * @rcu: RCU callback item for freeing. + * @irq: back pointer to parent. + * @virq: the virtual IRQ value provided to the requesting driver. + * + * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to + * a VMD IRQ using this structure. + */ +struct vmd_irq { + struct list_head node; + struct rcu_head rcu; + struct vmd_irq_list *irq; + unsigned int virq; +}; + +/** + * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector + * @irq_list: the list of irq's the VMD one demuxes to. + * @vmd_vector: the h/w IRQ assigned to the VMD. + * @index: index into the VMD MSI-X table; used for message routing. + * @count: number of child IRQs assigned to this vector; used to track + * sharing. + */ +struct vmd_irq_list { + struct list_head irq_list; + struct vmd_dev *vmd; + unsigned int vmd_vector; + unsigned int index; + unsigned int count; +}; + +struct vmd_dev { + struct pci_dev *dev; + + spinlock_t cfg_lock; + char __iomem *cfgbar; + + int msix_count; + struct msix_entry *msix_entries; + struct vmd_irq_list *irqs; + + struct pci_sysdata sysdata; + struct resource resources[3]; + struct irq_domain *irq_domain; + struct pci_bus *bus; + +#ifdef CONFIG_X86_DEV_DMA_OPS + struct dma_map_ops dma_ops; + struct dma_domain dma_domain; +#endif +}; + +static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) +{ + return container_of(bus->sysdata, struct vmd_dev, sysdata); +} + +/* + * Drivers managing a device in a VMD domain allocate their own IRQs as before, + * but the MSI entry for the hardware it's driving will be programmed with a + * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its + * domain into one of its own, and the VMD driver de-muxes these for the + * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations + * and irq_chip to set this up. + */ +static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct vmd_irq *vmdirq = data->chip_data; + struct vmd_irq_list *irq = vmdirq->irq; + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_ID(irq->index); + msg->data = 0; +} + +/* + * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. + */ +static void vmd_irq_enable(struct irq_data *data) +{ + struct vmd_irq *vmdirq = data->chip_data; + + raw_spin_lock(&list_lock); + list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); + raw_spin_unlock(&list_lock); + + data->chip->irq_unmask(data); +} + +static void vmd_irq_disable(struct irq_data *data) +{ + struct vmd_irq *vmdirq = data->chip_data; + + data->chip->irq_mask(data); + + raw_spin_lock(&list_lock); + list_del_rcu(&vmdirq->node); + raw_spin_unlock(&list_lock); +} + +/* + * XXX: Stubbed until we develop acceptable way to not create conflicts with + * other devices sharing the same vector. + */ +static int vmd_irq_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force) +{ + return -EINVAL; +} + +static struct irq_chip vmd_msi_controller = { + .name = "VMD-MSI", + .irq_enable = vmd_irq_enable, + .irq_disable = vmd_irq_disable, + .irq_compose_msi_msg = vmd_compose_msi_msg, + .irq_set_affinity = vmd_irq_set_affinity, +}; + +static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, + msi_alloc_info_t *arg) +{ + return 0; +} + +/* + * XXX: We can be even smarter selecting the best IRQ once we solve the + * affinity problem. + */ +static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd) +{ + int i, best = 0; + + raw_spin_lock(&list_lock); + for (i = 1; i < vmd->msix_count; i++) + if (vmd->irqs[i].count < vmd->irqs[best].count) + best = i; + vmd->irqs[best].count++; + raw_spin_unlock(&list_lock); + + return &vmd->irqs[best]; +} + +static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, + msi_alloc_info_t *arg) +{ + struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(arg->desc)->bus); + struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); + + if (!vmdirq) + return -ENOMEM; + + INIT_LIST_HEAD(&vmdirq->node); + vmdirq->irq = vmd_next_irq(vmd); + vmdirq->virq = virq; + + irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip, + vmdirq, handle_simple_irq, vmd, NULL); + return 0; +} + +static void vmd_msi_free(struct irq_domain *domain, + struct msi_domain_info *info, unsigned int virq) +{ + struct vmd_irq *vmdirq = irq_get_chip_data(virq); + + /* XXX: Potential optimization to rebalance */ + raw_spin_lock(&list_lock); + vmdirq->irq->count--; + raw_spin_unlock(&list_lock); + + kfree_rcu(vmdirq, rcu); +} + +static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = vmd_from_bus(pdev->bus); + + if (nvec > vmd->msix_count) + return vmd->msix_count; + + memset(arg, 0, sizeof(*arg)); + return 0; +} + +static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; +} + +static struct msi_domain_ops vmd_msi_domain_ops = { + .get_hwirq = vmd_get_hwirq, + .msi_init = vmd_msi_init, + .msi_free = vmd_msi_free, + .msi_prepare = vmd_msi_prepare, + .set_desc = vmd_set_desc, +}; + +static struct msi_domain_info vmd_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .ops = &vmd_msi_domain_ops, + .chip = &vmd_msi_controller, +}; + +#ifdef CONFIG_X86_DEV_DMA_OPS +/* + * VMD replaces the requester ID with its own. DMA mappings for devices in a + * VMD domain need to be mapped for the VMD, not the device requiring + * the mapping. + */ +static struct device *to_vmd_dev(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = vmd_from_bus(pdev->bus); + + return &vmd->dev->dev; +} + +static struct dma_map_ops *vmd_dma_ops(struct device *dev) +{ + return to_vmd_dev(dev)->archdata.dma_ops; +} + +static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, + gfp_t flag, struct dma_attrs *attrs) +{ + return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, + attrs); +} + +static void vmd_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t addr, struct dma_attrs *attrs) +{ + return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, + attrs); +} + +static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t addr, size_t size, + struct dma_attrs *attrs) +{ + return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, + size, attrs); +} + +static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t addr, size_t size, + struct dma_attrs *attrs) +{ + return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, + addr, size, attrs); +} + +static dma_addr_t vmd_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, + dir, attrs); +} + +static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); +} + +static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); +} + +static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); +} + +static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); +} + +static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, + dir); +} + +static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); +} + +static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); +} + +static int vmd_mapping_error(struct device *dev, dma_addr_t addr) +{ + return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); +} + +static int vmd_dma_supported(struct device *dev, u64 mask) +{ + return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); +} + +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK +static u64 vmd_get_required_mask(struct device *dev) +{ + return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); +} +#endif + +static void vmd_teardown_dma_ops(struct vmd_dev *vmd) +{ + struct dma_domain *domain = &vmd->dma_domain; + + if (vmd->dev->dev.archdata.dma_ops) + del_dma_domain(domain); +} + +#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ + do { \ + if (source->fn) \ + dest->fn = vmd_##fn; \ + } while (0) + +static void vmd_setup_dma_ops(struct vmd_dev *vmd) +{ + const struct dma_map_ops *source = vmd->dev->dev.archdata.dma_ops; + struct dma_map_ops *dest = &vmd->dma_ops; + struct dma_domain *domain = &vmd->dma_domain; + + domain->domain_nr = vmd->sysdata.domain; + domain->dma_ops = dest; + + if (!source) + return; + ASSIGN_VMD_DMA_OPS(source, dest, alloc); + ASSIGN_VMD_DMA_OPS(source, dest, free); + ASSIGN_VMD_DMA_OPS(source, dest, mmap); + ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); + ASSIGN_VMD_DMA_OPS(source, dest, map_page); + ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); + ASSIGN_VMD_DMA_OPS(source, dest, map_sg); + ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); + ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); + ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); + ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); + ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); + ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); + ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK + ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); +#endif + add_dma_domain(domain); +} +#undef ASSIGN_VMD_DMA_OPS +#else +static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} +static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} +#endif + +static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, + unsigned int devfn, int reg, int len) +{ + char __iomem *addr = vmd->cfgbar + + (bus->number << 20) + (devfn << 12) + reg; + + if ((addr - vmd->cfgbar) + len >= + resource_size(&vmd->dev->resource[VMD_CFGBAR])) + return NULL; + + return addr; +} + +/* + * CPU may deadlock if config space is not serialized on some versions of this + * hardware, so all config space access is done under a spinlock. + */ +static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, + int len, u32 *value) +{ + struct vmd_dev *vmd = vmd_from_bus(bus); + char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); + unsigned long flags; + int ret = 0; + + if (!addr) + return -EFAULT; + + spin_lock_irqsave(&vmd->cfg_lock, flags); + switch (len) { + case 1: + *value = readb(addr); + break; + case 2: + *value = readw(addr); + break; + case 4: + *value = readl(addr); + break; + default: + ret = -EINVAL; + break; + } + spin_unlock_irqrestore(&vmd->cfg_lock, flags); + return ret; +} + +/* + * VMD h/w converts non-posted config writes to posted memory writes. The + * read-back in this function forces the completion so it returns only after + * the config space was written, as expected. + */ +static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, + int len, u32 value) +{ + struct vmd_dev *vmd = vmd_from_bus(bus); + char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); + unsigned long flags; + int ret = 0; + + if (!addr) + return -EFAULT; + + spin_lock_irqsave(&vmd->cfg_lock, flags); + switch (len) { + case 1: + writeb(value, addr); + readb(addr); + break; + case 2: + writew(value, addr); + readw(addr); + break; + case 4: + writel(value, addr); + readl(addr); + break; + default: + ret = -EINVAL; + break; + } + spin_unlock_irqrestore(&vmd->cfg_lock, flags); + return ret; +} + +static struct pci_ops vmd_ops = { + .read = vmd_pci_read, + .write = vmd_pci_write, +}; + +/* + * VMD domains start at 0x1000 to not clash with ACPI _SEG domains. + */ +static int vmd_find_free_domain(void) +{ + int domain = 0xffff; + struct pci_bus *bus = NULL; + + while ((bus = pci_find_next_bus(bus)) != NULL) + domain = max_t(int, domain, pci_domain_nr(bus)); + return domain + 1; +} + +static int vmd_enable_domain(struct vmd_dev *vmd) +{ + struct pci_sysdata *sd = &vmd->sysdata; + struct resource *res; + u32 upper_bits; + unsigned long flags; + LIST_HEAD(resources); + + res = &vmd->dev->resource[VMD_CFGBAR]; + vmd->resources[0] = (struct resource) { + .name = "VMD CFGBAR", + .start = res->start, + .end = (resource_size(res) >> 20) - 1, + .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, + }; + + res = &vmd->dev->resource[VMD_MEMBAR1]; + upper_bits = upper_32_bits(res->end); + flags = res->flags & ~IORESOURCE_SIZEALIGN; + if (!upper_bits) + flags &= ~IORESOURCE_MEM_64; + vmd->resources[1] = (struct resource) { + .name = "VMD MEMBAR1", + .start = res->start, + .end = res->end, + .flags = flags, + }; + + res = &vmd->dev->resource[VMD_MEMBAR2]; + upper_bits = upper_32_bits(res->end); + flags = res->flags & ~IORESOURCE_SIZEALIGN; + if (!upper_bits) + flags &= ~IORESOURCE_MEM_64; + vmd->resources[2] = (struct resource) { + .name = "VMD MEMBAR2", + .start = res->start + 0x2000, + .end = res->end, + .flags = flags, + }; + + sd->domain = vmd_find_free_domain(); + if (sd->domain < 0) + return sd->domain; + + sd->node = pcibus_to_node(vmd->dev->bus); + + vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info, + NULL); + if (!vmd->irq_domain) + return -ENODEV; + + pci_add_resource(&resources, &vmd->resources[0]); + pci_add_resource(&resources, &vmd->resources[1]); + pci_add_resource(&resources, &vmd->resources[2]); + vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd, + &resources); + if (!vmd->bus) { + pci_free_resource_list(&resources); + irq_domain_remove(vmd->irq_domain); + return -ENODEV; + } + + vmd_setup_dma_ops(vmd); + dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); + pci_rescan_bus(vmd->bus); + + WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, + "domain"), "Can't create symlink to domain\n"); + return 0; +} + +static irqreturn_t vmd_irq(int irq, void *data) +{ + struct vmd_irq_list *irqs = data; + struct vmd_irq *vmdirq; + + rcu_read_lock(); + list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) + generic_handle_irq(vmdirq->virq); + rcu_read_unlock(); + + return IRQ_HANDLED; +} + +static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct vmd_dev *vmd; + int i, err; + + if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) + return -ENOMEM; + + vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); + if (!vmd) + return -ENOMEM; + + vmd->dev = dev; + err = pcim_enable_device(dev); + if (err < 0) + return err; + + vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); + if (!vmd->cfgbar) + return -ENOMEM; + + pci_set_master(dev); + if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && + dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) + return -ENODEV; + + vmd->msix_count = pci_msix_vec_count(dev); + if (vmd->msix_count < 0) + return -ENODEV; + + vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), + GFP_KERNEL); + if (!vmd->irqs) + return -ENOMEM; + + vmd->msix_entries = devm_kcalloc(&dev->dev, vmd->msix_count, + sizeof(*vmd->msix_entries), + GFP_KERNEL); + if (!vmd->msix_entries) + return -ENOMEM; + for (i = 0; i < vmd->msix_count; i++) + vmd->msix_entries[i].entry = i; + + vmd->msix_count = pci_enable_msix_range(vmd->dev, vmd->msix_entries, 1, + vmd->msix_count); + if (vmd->msix_count < 0) + return vmd->msix_count; + + for (i = 0; i < vmd->msix_count; i++) { + INIT_LIST_HEAD(&vmd->irqs[i].irq_list); + vmd->irqs[i].vmd_vector = vmd->msix_entries[i].vector; + vmd->irqs[i].index = i; + + err = devm_request_irq(&dev->dev, vmd->irqs[i].vmd_vector, + vmd_irq, 0, "vmd", &vmd->irqs[i]); + if (err) + return err; + } + + spin_lock_init(&vmd->cfg_lock); + pci_set_drvdata(dev, vmd); + err = vmd_enable_domain(vmd); + if (err) + return err; + + dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", + vmd->sysdata.domain); + return 0; +} + +static void vmd_remove(struct pci_dev *dev) +{ + struct vmd_dev *vmd = pci_get_drvdata(dev); + + pci_set_drvdata(dev, NULL); + sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); + pci_stop_root_bus(vmd->bus); + pci_remove_root_bus(vmd->bus); + vmd_teardown_dma_ops(vmd); + irq_domain_remove(vmd->irq_domain); +} + +#ifdef CONFIG_PM +static int vmd_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + pci_save_state(pdev); + return 0; +} + +static int vmd_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + pci_restore_state(pdev); + return 0; +} +#endif +static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); + +static const struct pci_device_id vmd_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, vmd_ids); + +static struct pci_driver vmd_drv = { + .name = "vmd", + .id_table = vmd_ids, + .probe = vmd_probe, + .remove = vmd_remove, + .driver = { + .pm = &vmd_dev_pm_ops, + }, +}; +module_pci_driver(vmd_drv); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.6"); -- cgit v1.2.3 From da48d094ce5d7c7dcdad9011648a81c42fd1c2ef Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 Jan 2016 16:58:07 -0800 Subject: Kconfig: remove HAVE_LATENCYTOP_SUPPORT As illustrated by commit a3afe70b83fd ("[S390] latencytop s390 support."), HAVE_LATENCYTOP_SUPPORT is defined by an architecture to advertise an implementation of save_stack_trace_tsk. However, as of 9212ddb5eada ("stacktrace: provide save_stack_trace_tsk() weak alias") a dummy implementation is provided if STACKTRACE=y. Given that LATENCYTOP already depends on STACKTRACE_SUPPORT and selects STACKTRACE, we can remove HAVE_LATENCYTOP_SUPPORT altogether. Signed-off-by: Will Deacon Acked-by: Heiko Carstens Cc: Vineet Gupta Cc: Russell King Cc: James Hogan Cc: Michal Simek Cc: Helge Deller Acked-by: Michael Ellerman Cc: "David S. Miller" Cc: Guan Xuetao Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arc/Kconfig | 3 --- arch/arm/Kconfig | 5 ----- arch/metag/Kconfig | 3 --- arch/microblaze/Kconfig | 3 --- arch/parisc/Kconfig | 3 --- arch/powerpc/Kconfig | 3 --- arch/s390/Kconfig | 3 --- arch/sh/Kconfig | 3 --- arch/sparc/Kconfig | 4 ---- arch/unicore32/Kconfig | 3 --- arch/x86/Kconfig | 3 --- lib/Kconfig.debug | 1 - 12 files changed, 37 deletions(-) (limited to 'arch/x86/Kconfig') diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 6312f607932f..76dde9db7934 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -73,9 +73,6 @@ config STACKTRACE_SUPPORT def_bool y select STACKTRACE -config HAVE_LATENCYTOP_SUPPORT - def_bool y - config HAVE_ARCH_TRANSPARENT_HUGEPAGE def_bool y depends on ARC_MMU_V4 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4e489cc5c45e..6a889afa6a2c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -168,11 +168,6 @@ config STACKTRACE_SUPPORT bool default y -config HAVE_LATENCYTOP_SUPPORT - bool - depends on !SMP - default y - config LOCKDEP_SUPPORT bool default y diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index 0b389a81c43a..a0fa88da3e31 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -36,9 +36,6 @@ config STACKTRACE_SUPPORT config LOCKDEP_SUPPORT def_bool y -config HAVE_LATENCYTOP_SUPPORT - def_bool y - config RWSEM_GENERIC_SPINLOCK def_bool y diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 0bce820428fc..5ecd0287a874 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -67,9 +67,6 @@ config STACKTRACE_SUPPORT config LOCKDEP_SUPPORT def_bool y -config HAVE_LATENCYTOP_SUPPORT - def_bool y - source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 729f89163bc3..7c34cafdf301 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -79,9 +79,6 @@ config TIME_LOW_RES depends on SMP default y -config HAVE_LATENCYTOP_SUPPORT - def_bool y - # unless you want to implement ACPI on PA-RISC ... ;-) config PM bool diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 7d5a8350f913..94f6c5089e0c 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -47,9 +47,6 @@ config STACKTRACE_SUPPORT bool default y -config HAVE_LATENCYTOP_SUPPORT - def_bool y - config TRACE_IRQFLAGS_SUPPORT bool default y diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 24490344c30f..dbeeb3a049f2 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -10,9 +10,6 @@ config LOCKDEP_SUPPORT config STACKTRACE_SUPPORT def_bool y -config HAVE_LATENCYTOP_SUPPORT - def_bool y - config RWSEM_GENERIC_SPINLOCK bool diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index d514df7e04dd..6c391a5d3e5c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -130,9 +130,6 @@ config STACKTRACE_SUPPORT config LOCKDEP_SUPPORT def_bool y -config HAVE_LATENCYTOP_SUPPORT - def_bool y - config ARCH_HAS_ILOG2_U32 def_bool n diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 56442d2d7bbc..3203e42190dd 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -101,10 +101,6 @@ config LOCKDEP_SUPPORT bool default y if SPARC64 -config HAVE_LATENCYTOP_SUPPORT - bool - default y if SPARC64 - config ARCH_HIBERNATION_POSSIBLE def_bool y if SPARC64 diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 5dc4c0a43ccd..877342640b6e 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -34,9 +34,6 @@ config NO_IOPORT_MAP config STACKTRACE_SUPPORT def_bool y -config HAVE_LATENCYTOP_SUPPORT - def_bool y - config LOCKDEP_SUPPORT def_bool y diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 24f362bf3ec6..4a10ba9e95da 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -180,9 +180,6 @@ config LOCKDEP_SUPPORT config STACKTRACE_SUPPORT def_bool y -config HAVE_LATENCYTOP_SUPPORT - def_bool y - config MMU def_bool y diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8fbdef1980a5..f75a33f29f6e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1597,7 +1597,6 @@ config FAULT_INJECTION_STACKTRACE_FILTER config LATENCYTOP bool "Latency measuring infrastructure" - depends on HAVE_LATENCYTOP_SUPPORT depends on DEBUG_KERNEL depends on STACKTRACE_SUPPORT depends on PROC_FS -- cgit v1.2.3 From 3fda5bb420e79b357328b358409e4c547d8f0a18 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 15 Jan 2016 22:11:07 +0200 Subject: x86/platform/intel-mid: Enable 64-bit build Intel Tangier SoC is known to have 64-bit dual core CPU. Enable 64-bit build for it. The kernel has been tested on Intel Edison board: Linux buildroot 4.4.0-next-20160115+ #25 SMP Fri Jan 15 22:03:19 EET 2016 x86_64 GNU/Linux processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 74 model name : Genuine Intel(R) CPU 4000 @ 500MHz stepping : 8 Signed-off-by: Andy Shevchenko Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Mika Westerberg Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1452888668-147116-1-git-send-email-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 3 +-- arch/x86/kernel/head64.c | 8 ++++++++ 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/x86/Kconfig') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 258965d56beb..07459a6b417d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -495,11 +495,10 @@ config X86_INTEL_CE config X86_INTEL_MID bool "Intel MID platform support" - depends on X86_32 depends on X86_EXTENDED_PLATFORM depends on X86_PLATFORM_DEVICES depends on PCI - depends on PCI_GOANY + depends on X86_64 || (PCI_GOANY && X86_32) depends on X86_IO_APIC select SFI select I2C diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index f129a9af6357..2c0f3407bd1f 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -192,5 +192,13 @@ void __init x86_64_start_reservations(char *real_mode_data) reserve_ebda_region(); + switch (boot_params.hdr.hardware_subarch) { + case X86_SUBARCH_INTEL_MID: + x86_intel_mid_early_setup(); + break; + default: + break; + } + start_kernel(); } -- cgit v1.2.3 From c6d308534aef6c99904bf5862066360ae067abc4 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Wed, 20 Jan 2016 15:00:55 -0800 Subject: UBSAN: run-time undefined behavior sanity checker UBSAN uses compile-time instrumentation to catch undefined behavior (UB). Compiler inserts code that perform certain kinds of checks before operations that could cause UB. If check fails (i.e. UB detected) __ubsan_handle_* function called to print error message. So the most of the work is done by compiler. This patch just implements ubsan handlers printing errors. GCC has this capability since 4.9.x [1] (see -fsanitize=undefined option and its suboptions). However GCC 5.x has more checkers implemented [2]. Article [3] has a bit more details about UBSAN in the GCC. [1] - https://gcc.gnu.org/onlinedocs/gcc-4.9.0/gcc/Debugging-Options.html [2] - https://gcc.gnu.org/onlinedocs/gcc/Debugging-Options.html [3] - http://developerblog.redhat.com/2014/10/16/gcc-undefined-behavior-sanitizer-ubsan/ Issues which UBSAN has found thus far are: Found bugs: * out-of-bounds access - 97840cb67ff5 ("netfilter: nfnetlink: fix insufficient validation in nfnetlink_bind") undefined shifts: * d48458d4a768 ("jbd2: use a better hash function for the revoke table") * 10632008b9e1 ("clockevents: Prevent shift out of bounds") * 'x << -1' shift in ext4 - http://lkml.kernel.org/r/<5444EF21.8020501@samsung.com> * undefined rol32(0) - http://lkml.kernel.org/r/<1449198241-20654-1-git-send-email-sasha.levin@oracle.com> * undefined dirty_ratelimit calculation - http://lkml.kernel.org/r/<566594E2.3050306@odin.com> * undefined roundown_pow_of_two(0) - http://lkml.kernel.org/r/<1449156616-11474-1-git-send-email-sasha.levin@oracle.com> * [WONTFIX] undefined shift in __bpf_prog_run - http://lkml.kernel.org/r/ WONTFIX here because it should be fixed in bpf program, not in kernel. signed overflows: * 32a8df4e0b33f ("sched: Fix odd values in effective_load() calculations") * mul overflow in ntp - http://lkml.kernel.org/r/<1449175608-1146-1-git-send-email-sasha.levin@oracle.com> * incorrect conversion into rtc_time in rtc_time64_to_tm() - http://lkml.kernel.org/r/<1449187944-11730-1-git-send-email-sasha.levin@oracle.com> * unvalidated timespec in io_getevents() - http://lkml.kernel.org/r/ * [NOTABUG] signed overflow in ktime_add_safe() - http://lkml.kernel.org/r/ [akpm@linux-foundation.org: fix unused local warning] [akpm@linux-foundation.org: fix __int128 build woes] Signed-off-by: Andrey Ryabinin Cc: Peter Zijlstra Cc: Sasha Levin Cc: Randy Dunlap Cc: Rasmus Villemoes Cc: Jonathan Corbet Cc: Michal Marek Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Yury Gribov Cc: Dmitry Vyukov Cc: Konstantin Khlebnikov Cc: Kostya Serebryany Cc: Johannes Berg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ubsan.txt | 84 +++++++ Makefile | 3 +- arch/x86/Kconfig | 1 + arch/x86/boot/Makefile | 1 + arch/x86/boot/compressed/Makefile | 1 + arch/x86/entry/vdso/Makefile | 1 + arch/x86/realmode/rm/Makefile | 1 + drivers/firmware/efi/libstub/Makefile | 1 + include/linux/sched.h | 3 + lib/Kconfig.debug | 2 + lib/Kconfig.ubsan | 29 +++ lib/Makefile | 3 + lib/ubsan.c | 456 ++++++++++++++++++++++++++++++++++ lib/ubsan.h | 84 +++++++ mm/kasan/Makefile | 1 + scripts/Makefile.lib | 6 + scripts/Makefile.ubsan | 17 ++ 17 files changed, 693 insertions(+), 1 deletion(-) create mode 100644 Documentation/ubsan.txt create mode 100644 lib/Kconfig.ubsan create mode 100644 lib/ubsan.c create mode 100644 lib/ubsan.h create mode 100644 scripts/Makefile.ubsan (limited to 'arch/x86/Kconfig') diff --git a/Documentation/ubsan.txt b/Documentation/ubsan.txt new file mode 100644 index 000000000000..f58215ef5797 --- /dev/null +++ b/Documentation/ubsan.txt @@ -0,0 +1,84 @@ +Undefined Behavior Sanitizer - UBSAN + +Overview +-------- + +UBSAN is a runtime undefined behaviour checker. + +UBSAN uses compile-time instrumentation to catch undefined behavior (UB). +Compiler inserts code that perform certain kinds of checks before operations +that may cause UB. If check fails (i.e. UB detected) __ubsan_handle_* +function called to print error message. + +GCC has that feature since 4.9.x [1] (see -fsanitize=undefined option and +its suboptions). GCC 5.x has more checkers implemented [2]. + +Report example +--------------- + + ================================================================================ + UBSAN: Undefined behaviour in ../include/linux/bitops.h:110:33 + shift exponent 32 is to large for 32-bit type 'unsigned int' + CPU: 0 PID: 0 Comm: swapper Not tainted 4.4.0-rc1+ #26 + 0000000000000000 ffffffff82403cc8 ffffffff815e6cd6 0000000000000001 + ffffffff82403cf8 ffffffff82403ce0 ffffffff8163a5ed 0000000000000020 + ffffffff82403d78 ffffffff8163ac2b ffffffff815f0001 0000000000000002 + Call Trace: + [] dump_stack+0x45/0x5f + [] ubsan_epilogue+0xd/0x40 + [] __ubsan_handle_shift_out_of_bounds+0xeb/0x130 + [] ? radix_tree_gang_lookup_slot+0x51/0x150 + [] _mix_pool_bytes+0x1e6/0x480 + [] ? dmi_walk_early+0x48/0x5c + [] add_device_randomness+0x61/0x130 + [] ? dmi_save_one_device+0xaa/0xaa + [] dmi_walk_early+0x48/0x5c + [] dmi_scan_machine+0x278/0x4b4 + [] ? vprintk_default+0x1a/0x20 + [] ? early_idt_handler_array+0x120/0x120 + [] setup_arch+0x405/0xc2c + [] ? early_idt_handler_array+0x120/0x120 + [] start_kernel+0x83/0x49a + [] ? early_idt_handler_array+0x120/0x120 + [] x86_64_start_reservations+0x2a/0x2c + [] x86_64_start_kernel+0x16b/0x17a + ================================================================================ + +Usage +----- + +To enable UBSAN configure kernel with: + + CONFIG_UBSAN=y + +and to check the entire kernel: + + CONFIG_UBSAN_SANITIZE_ALL=y + +To enable instrumentation for specific files or directories, add a line +similar to the following to the respective kernel Makefile: + + For a single file (e.g. main.o): + UBSAN_SANITIZE_main.o := y + + For all files in one directory: + UBSAN_SANITIZE := y + +To exclude files from being instrumented even if +CONFIG_UBSAN_SANITIZE_ALL=y, use: + + UBSAN_SANITIZE_main.o := n + and: + UBSAN_SANITIZE := n + +Detection of unaligned accesses controlled through the separate option - +CONFIG_UBSAN_ALIGNMENT. It's off by default on architectures that support +unaligned accesses (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y). One could +still enable it in config, just note that it will produce a lot of UBSAN +reports. + +References +---------- + +[1] - https://gcc.gnu.org/onlinedocs/gcc-4.9.0/gcc/Debugging-Options.html +[2] - https://gcc.gnu.org/onlinedocs/gcc/Debugging-Options.html diff --git a/Makefile b/Makefile index 7f4ac1ee4a2b..abfb3e8eb0b1 100644 --- a/Makefile +++ b/Makefile @@ -411,7 +411,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN CFLAGS_UBSAN export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL @@ -784,6 +784,7 @@ endif include scripts/Makefile.kasan include scripts/Makefile.extrawarn +include scripts/Makefile.ubsan # Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the # last assignments diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4a10ba9e95da..92b2a73162ee 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -31,6 +31,7 @@ config X86 select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_MMIO_FLUSH select ARCH_HAS_SG_CHAIN + select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI select ARCH_MIGHT_HAVE_PC_PARPORT diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 2ee62dba0373..bbe1a62efc02 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -60,6 +60,7 @@ clean-files += cpustr.h KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n +UBSAN_SANITIZE := n $(obj)/bzImage: asflags-y := $(SVGA_MODE) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 0a291cdfaf77..f9ce75d80101 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -33,6 +33,7 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n +UBSAN_SANITIZE :=n LDFLAGS := -m elf_$(UTS_MACHINE) LDFLAGS_vmlinux := -T diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 265c0ed68118..c854541d93ff 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -4,6 +4,7 @@ KBUILD_CFLAGS += $(DISABLE_LTO) KASAN_SANITIZE := n +UBSAN_SANITIZE := n VDSO64-$(CONFIG_X86_64) := y VDSOX32-$(CONFIG_X86_X32_ABI) := y diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index 2730d775ef9a..3e75fcf6b836 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -70,3 +70,4 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \ -I$(srctree)/arch/x86/boot KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n +UBSAN_SANITIZE := n diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 9c12e18031d5..aaf9c0bab42e 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -22,6 +22,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ GCOV_PROFILE := n KASAN_SANITIZE := n +UBSAN_SANITIZE := n lib-y := efi-stub-helper.o diff --git a/include/linux/sched.h b/include/linux/sched.h index 61aa9bbea871..02dabf281b2f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1643,6 +1643,9 @@ struct task_struct { struct held_lock held_locks[MAX_LOCK_DEPTH]; gfp_t lockdep_reclaim_gfp; #endif +#ifdef CONFIG_UBSAN + unsigned int in_ubsan; +#endif /* journalling filesystem info */ void *journal_info; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f75a33f29f6e..157220b9ff05 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1893,6 +1893,8 @@ source "samples/Kconfig" source "lib/Kconfig.kgdb" +source "lib/Kconfig.ubsan" + config ARCH_HAS_DEVMEM_IS_ALLOWED bool diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan new file mode 100644 index 000000000000..49518fb48cab --- /dev/null +++ b/lib/Kconfig.ubsan @@ -0,0 +1,29 @@ +config ARCH_HAS_UBSAN_SANITIZE_ALL + bool + +config UBSAN + bool "Undefined behaviour sanity checker" + help + This option enables undefined behaviour sanity checker + Compile-time instrumentation is used to detect various undefined + behaviours in runtime. Various types of checks may be enabled + via boot parameter ubsan_handle (see: Documentation/ubsan.txt). + +config UBSAN_SANITIZE_ALL + bool "Enable instrumentation for the entire kernel" + depends on UBSAN + depends on ARCH_HAS_UBSAN_SANITIZE_ALL + default y + help + This option activates instrumentation for the entire kernel. + If you don't enable this option, you have to explicitly specify + UBSAN_SANITIZE := y for the files/directories you want to check for UB. + +config UBSAN_ALIGNMENT + bool "Enable checking of pointers alignment" + depends on UBSAN + default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS + help + This option enables detection of unaligned memory accesses. + Enabling this option on architectures that support unalligned + accesses may produce a lot of false positives. diff --git a/lib/Makefile b/lib/Makefile index b2a82e600987..2d4bc33d09b4 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -209,3 +209,6 @@ quiet_cmd_build_OID_registry = GEN $@ clean-files += oid_registry_data.c obj-$(CONFIG_UCS2_STRING) += ucs2_string.o +obj-$(CONFIG_UBSAN) += ubsan.o + +UBSAN_SANITIZE_ubsan.o := n diff --git a/lib/ubsan.c b/lib/ubsan.c new file mode 100644 index 000000000000..8799ae5e2e42 --- /dev/null +++ b/lib/ubsan.c @@ -0,0 +1,456 @@ +/* + * UBSAN error reporting functions + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Andrey Ryabinin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ubsan.h" + +const char *type_check_kinds[] = { + "load of", + "store to", + "reference binding to", + "member access within", + "member call on", + "constructor call on", + "downcast of", + "downcast of" +}; + +#define REPORTED_BIT 31 + +#if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN) +#define COLUMN_MASK (~(1U << REPORTED_BIT)) +#define LINE_MASK (~0U) +#else +#define COLUMN_MASK (~0U) +#define LINE_MASK (~(1U << REPORTED_BIT)) +#endif + +#define VALUE_LENGTH 40 + +static bool was_reported(struct source_location *location) +{ + return test_and_set_bit(REPORTED_BIT, &location->reported); +} + +static void print_source_location(const char *prefix, + struct source_location *loc) +{ + pr_err("%s %s:%d:%d\n", prefix, loc->file_name, + loc->line & LINE_MASK, loc->column & COLUMN_MASK); +} + +static bool suppress_report(struct source_location *loc) +{ + return current->in_ubsan || was_reported(loc); +} + +static bool type_is_int(struct type_descriptor *type) +{ + return type->type_kind == type_kind_int; +} + +static bool type_is_signed(struct type_descriptor *type) +{ + WARN_ON(!type_is_int(type)); + return type->type_info & 1; +} + +static unsigned type_bit_width(struct type_descriptor *type) +{ + return 1 << (type->type_info >> 1); +} + +static bool is_inline_int(struct type_descriptor *type) +{ + unsigned inline_bits = sizeof(unsigned long)*8; + unsigned bits = type_bit_width(type); + + WARN_ON(!type_is_int(type)); + + return bits <= inline_bits; +} + +static s_max get_signed_val(struct type_descriptor *type, unsigned long val) +{ + if (is_inline_int(type)) { + unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type); + return ((s_max)val) << extra_bits >> extra_bits; + } + + if (type_bit_width(type) == 64) + return *(s64 *)val; + + return *(s_max *)val; +} + +static bool val_is_negative(struct type_descriptor *type, unsigned long val) +{ + return type_is_signed(type) && get_signed_val(type, val) < 0; +} + +static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val) +{ + if (is_inline_int(type)) + return val; + + if (type_bit_width(type) == 64) + return *(u64 *)val; + + return *(u_max *)val; +} + +static void val_to_string(char *str, size_t size, struct type_descriptor *type, + unsigned long value) +{ + if (type_is_int(type)) { + if (type_bit_width(type) == 128) { +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + u_max val = get_unsigned_val(type, value); + + scnprintf(str, size, "0x%08x%08x%08x%08x", + (u32)(val >> 96), + (u32)(val >> 64), + (u32)(val >> 32), + (u32)(val)); +#else + WARN_ON(1); +#endif + } else if (type_is_signed(type)) { + scnprintf(str, size, "%lld", + (s64)get_signed_val(type, value)); + } else { + scnprintf(str, size, "%llu", + (u64)get_unsigned_val(type, value)); + } + } +} + +static bool location_is_valid(struct source_location *loc) +{ + return loc->file_name != NULL; +} + +static DEFINE_SPINLOCK(report_lock); + +static void ubsan_prologue(struct source_location *location, + unsigned long *flags) +{ + current->in_ubsan++; + spin_lock_irqsave(&report_lock, *flags); + + pr_err("========================================" + "========================================\n"); + print_source_location("UBSAN: Undefined behaviour in", location); +} + +static void ubsan_epilogue(unsigned long *flags) +{ + dump_stack(); + pr_err("========================================" + "========================================\n"); + spin_unlock_irqrestore(&report_lock, *flags); + current->in_ubsan--; +} + +static void handle_overflow(struct overflow_data *data, unsigned long lhs, + unsigned long rhs, char op) +{ + + struct type_descriptor *type = data->type; + unsigned long flags; + char lhs_val_str[VALUE_LENGTH]; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); + val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); + pr_err("%s integer overflow:\n", + type_is_signed(type) ? "signed" : "unsigned"); + pr_err("%s %c %s cannot be represented in type %s\n", + lhs_val_str, + op, + rhs_val_str, + type->type_name); + + ubsan_epilogue(&flags); +} + +void __ubsan_handle_add_overflow(struct overflow_data *data, + unsigned long lhs, + unsigned long rhs) +{ + + handle_overflow(data, lhs, rhs, '+'); +} +EXPORT_SYMBOL(__ubsan_handle_add_overflow); + +void __ubsan_handle_sub_overflow(struct overflow_data *data, + unsigned long lhs, + unsigned long rhs) +{ + handle_overflow(data, lhs, rhs, '-'); +} +EXPORT_SYMBOL(__ubsan_handle_sub_overflow); + +void __ubsan_handle_mul_overflow(struct overflow_data *data, + unsigned long lhs, + unsigned long rhs) +{ + handle_overflow(data, lhs, rhs, '*'); +} +EXPORT_SYMBOL(__ubsan_handle_mul_overflow); + +void __ubsan_handle_negate_overflow(struct overflow_data *data, + unsigned long old_val) +{ + unsigned long flags; + char old_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); + + pr_err("negation of %s cannot be represented in type %s:\n", + old_val_str, data->type->type_name); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_negate_overflow); + + +void __ubsan_handle_divrem_overflow(struct overflow_data *data, + unsigned long lhs, + unsigned long rhs) +{ + unsigned long flags; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); + + if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1) + pr_err("division of %s by -1 cannot be represented in type %s\n", + rhs_val_str, data->type->type_name); + else + pr_err("division by zero\n"); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); + +static void handle_null_ptr_deref(struct type_mismatch_data *data) +{ + unsigned long flags; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + pr_err("%s null pointer of type %s\n", + type_check_kinds[data->type_check_kind], + data->type->type_name); + + ubsan_epilogue(&flags); +} + +static void handle_missaligned_access(struct type_mismatch_data *data, + unsigned long ptr) +{ + unsigned long flags; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + pr_err("%s misaligned address %p for type %s\n", + type_check_kinds[data->type_check_kind], + (void *)ptr, data->type->type_name); + pr_err("which requires %ld byte alignment\n", data->alignment); + + ubsan_epilogue(&flags); +} + +static void handle_object_size_mismatch(struct type_mismatch_data *data, + unsigned long ptr) +{ + unsigned long flags; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + pr_err("%s address %pk with insufficient space\n", + type_check_kinds[data->type_check_kind], + (void *) ptr); + pr_err("for an object of type %s\n", data->type->type_name); + ubsan_epilogue(&flags); +} + +void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, + unsigned long ptr) +{ + + if (!ptr) + handle_null_ptr_deref(data); + else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) + handle_missaligned_access(data, ptr); + else + handle_object_size_mismatch(data, ptr); +} +EXPORT_SYMBOL(__ubsan_handle_type_mismatch); + +void __ubsan_handle_nonnull_return(struct nonnull_return_data *data) +{ + unsigned long flags; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + pr_err("null pointer returned from function declared to never return null\n"); + + if (location_is_valid(&data->attr_location)) + print_source_location("returns_nonnull attribute specified in", + &data->attr_location); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_nonnull_return); + +void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, + unsigned long bound) +{ + unsigned long flags; + char bound_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(bound_str, sizeof(bound_str), data->type, bound); + pr_err("variable length array bound value %s <= 0\n", bound_str); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); + +void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, + unsigned long index) +{ + unsigned long flags; + char index_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(index_str, sizeof(index_str), data->index_type, index); + pr_err("index %s is out of range for type %s\n", index_str, + data->array_type->type_name); + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); + +void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + unsigned long lhs, unsigned long rhs) +{ + unsigned long flags; + struct type_descriptor *rhs_type = data->rhs_type; + struct type_descriptor *lhs_type = data->lhs_type; + char rhs_str[VALUE_LENGTH]; + char lhs_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); + val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); + + if (val_is_negative(rhs_type, rhs)) + pr_err("shift exponent %s is negative\n", rhs_str); + + else if (get_unsigned_val(rhs_type, rhs) >= + type_bit_width(lhs_type)) + pr_err("shift exponent %s is too large for %u-bit type %s\n", + rhs_str, + type_bit_width(lhs_type), + lhs_type->type_name); + else if (val_is_negative(lhs_type, lhs)) + pr_err("left shift of negative value %s\n", + lhs_str); + else + pr_err("left shift of %s by %s places cannot be" + " represented in type %s\n", + lhs_str, rhs_str, + lhs_type->type_name); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); + + +void __noreturn +__ubsan_handle_builtin_unreachable(struct unreachable_data *data) +{ + unsigned long flags; + + ubsan_prologue(&data->location, &flags); + pr_err("calling __builtin_unreachable()\n"); + ubsan_epilogue(&flags); + panic("can't return from __builtin_unreachable()"); +} +EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); + +void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, + unsigned long val) +{ + unsigned long flags; + char val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(val_str, sizeof(val_str), data->type, val); + + pr_err("load of value %s is not a valid value for type %s\n", + val_str, data->type->type_name); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); diff --git a/lib/ubsan.h b/lib/ubsan.h new file mode 100644 index 000000000000..b2d18d4a53f5 --- /dev/null +++ b/lib/ubsan.h @@ -0,0 +1,84 @@ +#ifndef _LIB_UBSAN_H +#define _LIB_UBSAN_H + +enum { + type_kind_int = 0, + type_kind_float = 1, + type_unknown = 0xffff +}; + +struct type_descriptor { + u16 type_kind; + u16 type_info; + char type_name[1]; +}; + +struct source_location { + const char *file_name; + union { + unsigned long reported; + struct { + u32 line; + u32 column; + }; + }; +}; + +struct overflow_data { + struct source_location location; + struct type_descriptor *type; +}; + +struct type_mismatch_data { + struct source_location location; + struct type_descriptor *type; + unsigned long alignment; + unsigned char type_check_kind; +}; + +struct nonnull_arg_data { + struct source_location location; + struct source_location attr_location; + int arg_index; +}; + +struct nonnull_return_data { + struct source_location location; + struct source_location attr_location; +}; + +struct vla_bound_data { + struct source_location location; + struct type_descriptor *type; +}; + +struct out_of_bounds_data { + struct source_location location; + struct type_descriptor *array_type; + struct type_descriptor *index_type; +}; + +struct shift_out_of_bounds_data { + struct source_location location; + struct type_descriptor *lhs_type; + struct type_descriptor *rhs_type; +}; + +struct unreachable_data { + struct source_location location; +}; + +struct invalid_value_data { + struct source_location location; + struct type_descriptor *type; +}; + +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) +typedef __int128 s_max; +typedef unsigned __int128 u_max; +#else +typedef s64 s_max; +typedef u64 u_max; +#endif + +#endif diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 64710148941e..a61460d9f5b0 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -1,4 +1,5 @@ KASAN_SANITIZE := n +UBSAN_SANITIZE_kasan.o := n CFLAGS_REMOVE_kasan.o = -pg # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 39d6bb18ce76..2edbcadb3d7f 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -130,6 +130,12 @@ _c_flags += $(if $(patsubst n%,, \ $(CFLAGS_KASAN)) endif +ifeq ($(CONFIG_UBSAN),y) +_c_flags += $(if $(patsubst n%,, \ + $(UBSAN_SANITIZE_$(basetarget).o)$(UBSAN_SANITIZE)$(CONFIG_UBSAN_SANITIZE_ALL)), \ + $(CFLAGS_UBSAN)) +endif + # If building the kernel in a separate objtree expand all occurrences # of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/'). diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan new file mode 100644 index 000000000000..8ab68679cfb5 --- /dev/null +++ b/scripts/Makefile.ubsan @@ -0,0 +1,17 @@ +ifdef CONFIG_UBSAN + CFLAGS_UBSAN += $(call cc-option, -fsanitize=shift) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=null) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=returns-nonnull-attribute) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=bool) + CFLAGS_UBSAN += $(call cc-option, -fsanitize=enum) + +ifdef CONFIG_UBSAN_ALIGNMENT + CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment) +endif +endif -- cgit v1.2.3 From e1c7e324539ada3b2b13ca2898bcb4948a9ef9db Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 20 Jan 2016 15:02:05 -0800 Subject: dma-mapping: always provide the dma_map_ops based implementation Move the generic implementation to now that all architectures support it and remove the HAVE_DMA_ATTR Kconfig symbol now that everyone supports them. [valentinrothberg@gmail.com: remove leftovers in Kconfig] Signed-off-by: Christoph Hellwig Cc: "David S. Miller" Cc: Aurelien Jacquiot Cc: Chris Metcalf Cc: David Howells Cc: Geert Uytterhoeven Cc: Haavard Skinnemoen Cc: Hans-Christian Egtvedt Cc: Helge Deller Cc: James Hogan Cc: Jesper Nilsson Cc: Koichi Yasutake Cc: Ley Foon Tan Cc: Mark Salter Cc: Mikael Starvik Cc: Steven Miao Cc: Vineet Gupta Cc: Christian Borntraeger Cc: Joerg Roedel Cc: Sebastian Ott Signed-off-by: Valentin Rothberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/DMA-API-HOWTO.txt | 10 - .../features/io/dma_map_attrs/arch-support.txt | 40 --- arch/Kconfig | 3 - arch/alpha/Kconfig | 1 - arch/alpha/include/asm/dma-mapping.h | 2 - arch/arc/Kconfig | 1 - arch/arc/include/asm/dma-mapping.h | 2 - arch/arm/Kconfig | 1 - arch/arm/include/asm/dma-mapping.h | 7 - arch/arm64/Kconfig | 1 - arch/arm64/include/asm/dma-mapping.h | 2 - arch/avr32/Kconfig | 1 - arch/avr32/include/asm/dma-mapping.h | 2 - arch/blackfin/Kconfig | 1 - arch/blackfin/include/asm/dma-mapping.h | 2 - arch/c6x/Kconfig | 1 - arch/c6x/include/asm/dma-mapping.h | 2 - arch/cris/Kconfig | 1 - arch/cris/include/asm/dma-mapping.h | 2 - arch/frv/Kconfig | 1 - arch/frv/include/asm/dma-mapping.h | 2 - arch/h8300/Kconfig | 1 - arch/h8300/include/asm/dma-mapping.h | 2 - arch/hexagon/Kconfig | 1 - arch/hexagon/include/asm/dma-mapping.h | 2 - arch/ia64/Kconfig | 1 - arch/ia64/include/asm/dma-mapping.h | 2 - arch/m68k/Kconfig | 1 - arch/m68k/include/asm/dma-mapping.h | 2 - arch/metag/Kconfig | 1 - arch/metag/include/asm/dma-mapping.h | 2 - arch/microblaze/Kconfig | 1 - arch/microblaze/include/asm/dma-mapping.h | 2 - arch/mips/Kconfig | 1 - arch/mips/include/asm/dma-mapping.h | 2 - arch/mn10300/Kconfig | 1 - arch/mn10300/include/asm/dma-mapping.h | 2 - arch/nios2/Kconfig | 1 - arch/openrisc/Kconfig | 3 - arch/openrisc/include/asm/dma-mapping.h | 2 - arch/parisc/Kconfig | 1 - arch/parisc/include/asm/dma-mapping.h | 2 - arch/powerpc/Kconfig | 1 - arch/powerpc/include/asm/dma-mapping.h | 2 - arch/s390/Kconfig | 1 - arch/s390/include/asm/dma-mapping.h | 2 - arch/sh/Kconfig | 1 - arch/sh/include/asm/dma-mapping.h | 2 - arch/sparc/Kconfig | 1 - arch/sparc/include/asm/dma-mapping.h | 2 - arch/tile/Kconfig | 1 - arch/tile/include/asm/dma-mapping.h | 3 - arch/unicore32/Kconfig | 1 - arch/unicore32/include/asm/dma-mapping.h | 2 - arch/x86/Kconfig | 1 - arch/x86/include/asm/dma-mapping.h | 2 - arch/xtensa/Kconfig | 1 - arch/xtensa/include/asm/dma-mapping.h | 2 - drivers/gpu/drm/Kconfig | 4 +- drivers/gpu/drm/imx/Kconfig | 2 +- drivers/gpu/drm/rcar-du/Kconfig | 2 +- drivers/gpu/drm/shmobile/Kconfig | 2 +- drivers/gpu/drm/sti/Kconfig | 2 +- drivers/gpu/drm/tilcdc/Kconfig | 2 +- drivers/gpu/drm/vc4/Kconfig | 2 +- drivers/media/platform/Kconfig | 1 - include/asm-generic/dma-mapping-broken.h | 95 ------ include/asm-generic/dma-mapping-common.h | 358 ------------------- include/linux/dma-attrs.h | 10 - include/linux/dma-mapping.h | 379 ++++++++++++++++++++- 70 files changed, 369 insertions(+), 633 deletions(-) delete mode 100644 Documentation/features/io/dma_map_attrs/arch-support.txt delete mode 100644 include/asm-generic/dma-mapping-broken.h delete mode 100644 include/asm-generic/dma-mapping-common.h (limited to 'arch/x86/Kconfig') diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index d69b3fc64e14..781024ef9050 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt @@ -951,16 +951,6 @@ to "Closing". alignment constraints (e.g. the alignment constraints about 64-bit objects). -3) Supporting multiple types of IOMMUs - - If your architecture needs to support multiple types of IOMMUs, you - can use include/linux/asm-generic/dma-mapping-common.h. It's a - library to support the DMA API with multiple types of IOMMUs. Lots - of architectures (x86, powerpc, sh, alpha, ia64, microblaze and - sparc) use it. Choose one to see how it can be used. If you need to - support multiple types of IOMMUs in a single system, the example of - x86 or powerpc helps. - Closing This document, and the API itself, would not be in its current diff --git a/Documentation/features/io/dma_map_attrs/arch-support.txt b/Documentation/features/io/dma_map_attrs/arch-support.txt deleted file mode 100644 index 51d0f1c02a3e..000000000000 --- a/Documentation/features/io/dma_map_attrs/arch-support.txt +++ /dev/null @@ -1,40 +0,0 @@ -# -# Feature name: dma_map_attrs -# Kconfig: HAVE_DMA_ATTRS -# description: arch provides dma_*map*_attrs() APIs -# - ----------------------- - | arch |status| - ----------------------- - | alpha: | ok | - | arc: | TODO | - | arm: | ok | - | arm64: | ok | - | avr32: | TODO | - | blackfin: | TODO | - | c6x: | TODO | - | cris: | TODO | - | frv: | TODO | - | h8300: | ok | - | hexagon: | ok | - | ia64: | ok | - | m32r: | TODO | - | m68k: | TODO | - | metag: | TODO | - | microblaze: | ok | - | mips: | ok | - | mn10300: | TODO | - | nios2: | TODO | - | openrisc: | ok | - | parisc: | TODO | - | powerpc: | ok | - | s390: | ok | - | score: | TODO | - | sh: | ok | - | sparc: | ok | - | tile: | ok | - | um: | TODO | - | unicore32: | ok | - | x86: | ok | - | xtensa: | TODO | - ----------------------- diff --git a/arch/Kconfig b/arch/Kconfig index 51c03efb4083..f6b649d88ec8 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -205,9 +205,6 @@ config HAVE_NMI_WATCHDOG config HAVE_ARCH_TRACEHOOK bool -config HAVE_DMA_ATTRS - bool - config HAVE_DMA_CONTIGUOUS bool diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index f515a4dbf7a0..9d8a85801ed1 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -9,7 +9,6 @@ config ALPHA select HAVE_OPROFILE select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS - select HAVE_DMA_ATTRS select VIRT_TO_BUS select GENERIC_IRQ_PROBE select AUTO_IRQ_AFFINITY if SMP diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 72a8ca7796d9..3c3451f58ff4 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -10,8 +10,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } -#include - #define dma_cache_sync(dev, va, size, dir) ((void)0) #endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 8150c2783583..76dde9db7934 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -38,7 +38,6 @@ config ARC select OF_EARLY_FLATTREE select PERF_USE_VMALLOC select HAVE_DEBUG_STACKOVERFLOW - select HAVE_DMA_ATTRS config TRACE_IRQFLAGS_SUPPORT def_bool y diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 2a617f9c1e92..660205414f1d 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -18,6 +18,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &arc_dma_ops; } -#include - #endif diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 6a889afa6a2c..52311774e18e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -47,7 +47,6 @@ config ARM select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG - select HAVE_DMA_ATTRS select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index ccb3aa64640d..6ad1ceda62a5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -41,13 +41,6 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) #define HAVE_ARCH_DMA_SUPPORTED 1 extern int dma_supported(struct device *dev, u64 mask); -/* - * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent - * implementations, we don't provide a dma_cache_sync function so drivers using - * this API are highlighted with build warnings. - */ -#include - #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma #endif diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6be3fa2310ee..8cc62289a63e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -64,7 +64,6 @@ config ARM64 select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG - select HAVE_DMA_ATTRS select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 61e08f360e31..ba437f090a74 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -64,8 +64,6 @@ static inline bool is_device_dma_coherent(struct device *dev) return dev->archdata.dma_coherent; } -#include - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { return (dma_addr_t)paddr; diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index aac3d6972c30..b6878eb64884 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -7,7 +7,6 @@ config AVR32 select HAVE_OPROFILE select HAVE_KPROBES select VIRT_TO_BUS - select HAVE_DMA_ATTRS select GENERIC_IRQ_PROBE select GENERIC_ATOMIC64 select HARDIRQS_SW_RESEND diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index 0239ca84eb41..1115f2a645d1 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h @@ -11,6 +11,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &avr32_dma_ops; } -#include - #endif /* __ASM_AVR32_DMA_MAPPING_H */ diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 4be2f905198d..af76634f8d98 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -14,7 +14,6 @@ config BLACKFIN def_bool y select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK - select HAVE_DMA_ATTRS select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index ea5a2e82db7c..3490570aaa82 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h @@ -43,6 +43,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &bfin_dma_ops; } -#include - #endif /* _BLACKFIN_DMA_MAPPING_H */ diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 8602f725e270..79049d432d3c 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -18,7 +18,6 @@ config C6X select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA select ARCH_NO_COHERENT_DMA_MMAP - select HAVE_DMA_ATTRS config MMU def_bool n diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index f881e425d442..6b5cd7b0cf32 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h @@ -24,8 +24,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &c6x_dma_ops; } -#include - extern void coherent_mem_init(u32 start, u32 size); void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 20d919c93c7f..e086f9e93728 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -54,7 +54,6 @@ config CRIS select GENERIC_ATOMIC64 select HAVE_UID16 select VIRT_TO_BUS - select HAVE_DMA_ATTRS select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_IRQ_SHOW select GENERIC_IOMAP diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 34e7c7c7eccb..5a370178a0e9 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h @@ -16,8 +16,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) } #endif -#include - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index e3837814f593..eefd9a4ed156 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -16,7 +16,6 @@ config FRV select OLD_SIGACTION select HAVE_DEBUG_STACKOVERFLOW select ARCH_NO_COHERENT_DMA_MMAP - select HAVE_DMA_ATTRS config ZONE_DMA bool diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index 750951cbba88..9a82bfa4303b 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h @@ -21,6 +21,4 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, flush_write_buffers(); } -#include - #endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 2e20333cbce9..8c7c82586da0 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -15,7 +15,6 @@ config H8300 select OF_IRQ select OF_EARLY_FLATTREE select HAVE_MEMBLOCK - select HAVE_DMA_ATTRS select CLKSRC_OF select H8300_TMR8 diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index d9b5b806afe6..7ac7fadffed0 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -8,6 +8,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &h8300_dma_map_ops; } -#include - #endif diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 4dc89d1f9c48..57298e7b4867 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -27,7 +27,6 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES - select HAVE_DMA_ATTRS ---help--- Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 268fde8a4575..aa6203464520 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -49,8 +49,6 @@ extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); -#include - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index eb0249e37981..fb0515eb639b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -25,7 +25,6 @@ config IA64 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE if (!ITANIUM) select HAVE_FUNCTION_TRACER - select HAVE_DMA_ATTRS select TTY select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 9beccf8010bd..d472805edfa9 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -25,8 +25,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, #define get_dma_ops(dev) platform_dma_get_ops(dev) -#include - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index d5d75b3154a1..498b567f007b 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -23,7 +23,6 @@ config M68K select MODULES_USE_ELF_RELA select OLD_SIGSUSPEND3 select OLD_SIGACTION - select HAVE_DMA_ATTRS config RWSEM_GENERIC_SPINLOCK bool diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 2c082a63af35..96c536194287 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h @@ -8,8 +8,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &m68k_dma_ops; } -#include - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) { diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index ad8604c2d9f6..a0fa88da3e31 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -29,7 +29,6 @@ config METAG select OF select OF_EARLY_FLATTREE select SPARSE_IRQ - select HAVE_DMA_ATTRS config STACKTRACE_SUPPORT def_bool y diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h index 768f2e30236d..27af5d479ce6 100644 --- a/arch/metag/include/asm/dma-mapping.h +++ b/arch/metag/include/asm/dma-mapping.h @@ -8,8 +8,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &metag_dma_ops; } -#include - /* * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to * do any flushing here. diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 5ecd0287a874..53b69deceb99 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -19,7 +19,6 @@ config MICROBLAZE select HAVE_ARCH_KGDB select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG - select HAVE_DMA_ATTRS select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 24b12970c9cf..1884783d15c0 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -44,8 +44,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &dma_direct_ops; } -#include - static inline void __dma_sync(unsigned long paddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 71683a853372..fbf3f6670b69 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -31,7 +31,6 @@ config MIPS select RTC_LIB if !MACH_LOONGSON64 select GENERIC_ATOMIC64 if !64BIT select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE - select HAVE_DMA_ATTRS select HAVE_DMA_CONTIGUOUS select HAVE_DMA_API_DEBUG select GENERIC_IRQ_PROBE diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index e604f760c4a0..12fa79e2f1b4 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -29,8 +29,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline void dma_mark_clean(void *addr, size_t size) {} -#include - extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index e8ebf78f6d21..10607f0d2bcd 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -15,7 +15,6 @@ config MN10300 select OLD_SIGACTION select HAVE_DEBUG_STACKOVERFLOW select ARCH_NO_COHERENT_DMA_MMAP - select HAVE_DMA_ATTRS config AM33_2 def_bool n diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index e69b0130335c..1dcd44757f32 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h @@ -28,6 +28,4 @@ void dma_cache_sync(void *vaddr, size_t size, mn10300_dcache_flush_inv(); } -#include - #endif diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 4b2504d28178..437555424bda 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -16,7 +16,6 @@ config NIOS2 select SOC_BUS select SPARSE_IRQ select USB_ARCH_HAS_HCD if USB_SUPPORT - select HAVE_DMA_ATTRS config GENERIC_CSUM def_bool y diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 443f44de1020..e118c02cc79a 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -29,9 +29,6 @@ config OPENRISC config MMU def_bool y -config HAVE_DMA_ATTRS - def_bool y - config RWSEM_GENERIC_SPINLOCK def_bool y diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 413bfcf86384..1f260bccb368 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -42,6 +42,4 @@ static inline int dma_supported(struct device *dev, u64 dma_mask) return dma_mask == DMA_BIT_MASK(32); } -#include - #endif /* __ASM_OPENRISC_DMA_MAPPING_H */ diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 1489351134fa..14f655cf542e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -30,7 +30,6 @@ config PARISC select HAVE_DEBUG_STACKOVERFLOW select HAVE_ARCH_AUDITSYSCALL select ARCH_NO_COHERENT_DMA_MMAP - select HAVE_DMA_ATTRS help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 4de518647612..16e024602737 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -83,6 +83,4 @@ struct parisc_device; void * sba_get_iommu(struct parisc_device *dev); #endif -#include - #endif diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8310be4ffe31..e4824fd04bb7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -108,7 +108,6 @@ config PPC select HAVE_ARCH_TRACEHOOK select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP - select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG select HAVE_OPROFILE select HAVE_DEBUG_KMEMLEAK diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 7f522c021dc3..77816acd4fd9 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -125,8 +125,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) #define HAVE_ARCH_DMA_SET_MASK 1 extern int dma_set_mask(struct device *dev, u64 dma_mask); -#include - extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index dbeeb3a049f2..3be9c832dec1 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -579,7 +579,6 @@ config QDIO menuconfig PCI bool "PCI support" - select HAVE_DMA_ATTRS select PCI_MSI select IOMMU_SUPPORT help diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index b3fd54d93dd2..e64bfcb9702f 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -23,8 +23,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, { } -#include - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 6c391a5d3e5c..e13da05505dc 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -11,7 +11,6 @@ config SUPERH select HAVE_GENERIC_DMA_COHERENT select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG - select HAVE_DMA_ATTRS select HAVE_PERF_EVENTS select HAVE_DEBUG_BUGVERBOSE select ARCH_HAVE_CUSTOM_GPIO_H diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index a3745a3fe029..e11cf0c8206b 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -11,8 +11,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #define DMA_ERROR_CODE 0 -#include - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 3203e42190dd..57ffaf285c2f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -26,7 +26,6 @@ config SPARC select RTC_CLASS select RTC_DRV_M48T59 select RTC_SYSTOHC - select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG select HAVE_ARCH_JUMP_LABEL if SPARC64 select GENERIC_IRQ_SHOW diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 2777092dd851..1180ae254154 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -37,6 +37,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } -#include - #endif diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 6bfbe8b71e7e..de4a4fff9323 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -5,7 +5,6 @@ config TILE def_bool y select HAVE_PERF_EVENTS select USE_PMC if PERF_EVENTS - select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG select HAVE_KVM if !TILEGX select GENERIC_FIND_FIRST_BIT diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index c342736e3f1f..01ceb4a895b0 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -73,9 +73,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) } #define HAVE_ARCH_DMA_SET_MASK 1 - -#include - int dma_set_mask(struct device *dev, u64 mask); /* diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 877342640b6e..e5602ee9c610 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -5,7 +5,6 @@ config UNICORE32 select ARCH_MIGHT_HAVE_PC_SERIO select HAVE_MEMBLOCK select HAVE_GENERIC_DMA_COHERENT - select HAVE_DMA_ATTRS select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select GENERIC_ATOMIC64 diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 8140e053ccd3..4749854afd03 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -28,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &swiotlb_dma_map_ops; } -#include - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (dev && dev->dma_mask) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 92b2a73162ee..89159a6fa503 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -100,7 +100,6 @@ config X86 select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW select HAVE_DMA_API_DEBUG - select HAVE_DMA_ATTRS select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 953b7263f844..3a27b93e6261 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -46,8 +46,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); #define HAVE_ARCH_DMA_SUPPORTED 1 extern int dma_supported(struct device *hwdev, u64 mask); -#include - extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, struct dma_attrs *attrs); diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 82044f732323..e9df1567d778 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -15,7 +15,6 @@ config XTENSA select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select HAVE_DMA_API_DEBUG - select HAVE_DMA_ATTRS select HAVE_FUNCTION_TRACER select HAVE_FUTEX_CMPXCHG if !MMU select HAVE_IRQ_TIME_ACCOUNTING diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 66c9ba261e30..87b7a7dfbcf3 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -30,8 +30,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &xtensa_dma_map_ops; } -#include - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 59babd5a5396..8ae7ab68cb97 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -82,13 +82,13 @@ config DRM_TTM config DRM_GEM_CMA_HELPER bool - depends on DRM && HAVE_DMA_ATTRS + depends on DRM help Choose this if you need the GEM CMA helper functions config DRM_KMS_CMA_HELPER bool - depends on DRM && HAVE_DMA_ATTRS + depends on DRM select DRM_GEM_CMA_HELPER select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 35ca4f007839..a1844b50546c 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -5,7 +5,7 @@ config DRM_IMX select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER - depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS + depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) depends on IMX_IPUV3_CORE help enable i.MX graphics support diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index d4e0a39568f6..96dcd4a78951 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -1,6 +1,6 @@ config DRM_RCAR_DU tristate "DRM Support for R-Car Display Unit" - depends on DRM && ARM && HAVE_DMA_ATTRS && OF + depends on DRM && ARM && OF depends on ARCH_SHMOBILE || COMPILE_TEST select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index b9202aa6f8ab..8d17d00ddb4b 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -1,6 +1,6 @@ config DRM_SHMOBILE tristate "DRM Support for SH Mobile" - depends on DRM && ARM && HAVE_DMA_ATTRS + depends on DRM && ARM depends on ARCH_SHMOBILE || COMPILE_TEST depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM select BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index 10c1b1926e6f..5ad43a1bb260 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -1,6 +1,6 @@ config DRM_STI tristate "DRM Support for STMicroelectronics SoC stiH41x Series" - depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS + depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) select RESET_CONTROLLER select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 78beafb0742c..f60a1ec84fa4 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -1,6 +1,6 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" - depends on DRM && OF && ARM && HAVE_DMA_ATTRS + depends on DRM && OF && ARM select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 2d7d115ddf3f..584810474e5b 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -1,7 +1,7 @@ config DRM_VC4 tristate "Broadcom VC4 Graphics" depends on ARCH_BCM2835 || COMPILE_TEST - depends on DRM && HAVE_DMA_ATTRS + depends on DRM select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 0c53805dff0e..526359447ff9 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -216,7 +216,6 @@ config VIDEO_STI_BDISP tristate "STMicroelectronics BDISP 2D blitter driver" depends on VIDEO_DEV && VIDEO_V4L2 depends on ARCH_STI || COMPILE_TEST - depends on HAVE_DMA_ATTRS select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV help diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h deleted file mode 100644 index 6c32af918c2f..000000000000 --- a/include/asm-generic/dma-mapping-broken.h +++ /dev/null @@ -1,95 +0,0 @@ -#ifndef _ASM_GENERIC_DMA_MAPPING_H -#define _ASM_GENERIC_DMA_MAPPING_H - -/* define the dma api to allow compilation but not linking of - * dma dependent code. Code that depends on the dma-mapping - * API needs to set 'depends on HAS_DMA' in its Kconfig - */ - -struct scatterlist; - -extern void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flag); - -extern void -dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle); - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - /* attrs is not supported and ignored */ - return dma_alloc_coherent(dev, size, dma_handle, flag); -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - /* attrs is not supported and ignored */ - dma_free_coherent(dev, size, cpu_addr, dma_handle); -} - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -extern dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction); - -extern void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction); - -extern int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction); - -extern void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction); - -extern dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction); - -extern void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction); - -extern void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction); - -extern void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction); - -extern void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction); - -#define dma_sync_single_for_device dma_sync_single_for_cpu -#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu -#define dma_sync_sg_for_device dma_sync_sg_for_cpu - -extern int -dma_mapping_error(struct device *dev, dma_addr_t dma_addr); - -extern int -dma_supported(struct device *dev, u64 mask); - -extern int -dma_set_mask(struct device *dev, u64 mask); - -extern int -dma_get_cache_alignment(void); - -extern void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction); - -#endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h deleted file mode 100644 index b1bc954eccf3..000000000000 --- a/include/asm-generic/dma-mapping-common.h +++ /dev/null @@ -1,358 +0,0 @@ -#ifndef _ASM_GENERIC_DMA_MAPPING_H -#define _ASM_GENERIC_DMA_MAPPING_H - -#include -#include -#include -#include -#include -#include - -static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, - size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - dma_addr_t addr; - - kmemcheck_mark_initialized(ptr, size); - BUG_ON(!valid_dma_direction(dir)); - addr = ops->map_page(dev, virt_to_page(ptr), - (unsigned long)ptr & ~PAGE_MASK, size, - dir, attrs); - debug_dma_map_page(dev, virt_to_page(ptr), - (unsigned long)ptr & ~PAGE_MASK, size, - dir, addr, true); - return addr; -} - -static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, - size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, attrs); - debug_dma_unmap_page(dev, addr, size, dir, true); -} - -/* - * dma_maps_sg_attrs returns 0 on error and > 0 on success. - * It should never return a value < 0. - */ -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - int i, ents; - struct scatterlist *s; - - for_each_sg(sg, s, nents, i) - kmemcheck_mark_initialized(sg_virt(s), s->length); - BUG_ON(!valid_dma_direction(dir)); - ents = ops->map_sg(dev, sg, nents, dir, attrs); - BUG_ON(ents < 0); - debug_dma_map_sg(dev, sg, nents, ents, dir); - - return ents; -} - -static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - debug_dma_unmap_sg(dev, sg, nents, dir); - if (ops->unmap_sg) - ops->unmap_sg(dev, sg, nents, dir, attrs); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - size_t offset, size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - dma_addr_t addr; - - kmemcheck_mark_initialized(page_address(page) + offset, size); - BUG_ON(!valid_dma_direction(dir)); - addr = ops->map_page(dev, page, offset, size, dir, NULL); - debug_dma_map_page(dev, page, offset, size, dir, addr, false); - - return addr; -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, NULL); - debug_dma_unmap_page(dev, addr, size, dir, false); -} - -static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, - size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_cpu) - ops->sync_single_for_cpu(dev, addr, size, dir); - debug_dma_sync_single_for_cpu(dev, addr, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_device) - ops->sync_single_for_device(dev, addr, size, dir); - debug_dma_sync_single_for_device(dev, addr, size, dir); -} - -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t addr, - unsigned long offset, - size_t size, - enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_cpu) - ops->sync_single_for_cpu(dev, addr + offset, size, dir); - debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t addr, - unsigned long offset, - size_t size, - enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_device) - ops->sync_single_for_device(dev, addr + offset, size, dir); - debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_sg_for_cpu) - ops->sync_sg_for_cpu(dev, sg, nelems, dir); - debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_sg_for_device) - ops->sync_sg_for_device(dev, sg, nelems, dir); - debug_dma_sync_sg_for_device(dev, sg, nelems, dir); - -} - -#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) -#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) -#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) -#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) - -extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); - -void *dma_common_contiguous_remap(struct page *page, size_t size, - unsigned long vm_flags, - pgprot_t prot, const void *caller); - -void *dma_common_pages_remap(struct page **pages, size_t size, - unsigned long vm_flags, pgprot_t prot, - const void *caller); -void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); - -/** - * dma_mmap_attrs - map a coherent DMA allocation into user space - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @vma: vm_area_struct describing requested user mapping - * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs - * @handle: device-view address returned from dma_alloc_attrs - * @size: size of memory originally requested in dma_alloc_attrs - * @attrs: attributes of mapping properties requested in dma_alloc_attrs - * - * Map a coherent DMA buffer previously allocated by dma_alloc_attrs - * into user space. The coherent DMA buffer must not be freed by the - * driver until the user space mapping has been released. - */ -static inline int -dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!ops); - if (ops->mmap) - return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); -} - -#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) - -int -dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, size_t size); - -static inline int -dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!ops); - if (ops->get_sgtable) - return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, - attrs); - return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); -} - -#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) - -#ifndef arch_dma_alloc_attrs -#define arch_dma_alloc_attrs(dev, flag) (true) -#endif - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - - BUG_ON(!ops); - - if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) - return cpu_addr; - - if (!arch_dma_alloc_attrs(&dev, &flag)) - return NULL; - if (!ops->alloc) - return NULL; - - cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - return cpu_addr; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!ops); - WARN_ON(irqs_disabled()); - - if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) - return; - - if (!ops->free) - return; - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); -} - -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ - return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); -} - -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) -{ - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); -} - -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - - if (get_dma_ops(dev)->mapping_error) - return get_dma_ops(dev)->mapping_error(dev, dma_addr); - -#ifdef DMA_ERROR_CODE - return dma_addr == DMA_ERROR_CODE; -#else - return 0; -#endif -} - -#ifndef HAVE_ARCH_DMA_SUPPORTED -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (!ops) - return 0; - if (!ops->dma_supported) - return 1; - return ops->dma_supported(dev, mask); -} -#endif - -#ifndef HAVE_ARCH_DMA_SET_MASK -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (ops->set_dma_mask) - return ops->set_dma_mask(dev, mask); - - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - *dev->dma_mask = mask; - return 0; -} -#endif - -#endif diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h index c8e1831d7572..99c0be00b47c 100644 --- a/include/linux/dma-attrs.h +++ b/include/linux/dma-attrs.h @@ -41,7 +41,6 @@ static inline void init_dma_attrs(struct dma_attrs *attrs) bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); } -#ifdef CONFIG_HAVE_DMA_ATTRS /** * dma_set_attr - set a specific attribute * @attr: attribute to set @@ -67,14 +66,5 @@ static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) BUG_ON(attr >= DMA_ATTR_MAX); return test_bit(attr, attrs->flags); } -#else /* !CONFIG_HAVE_DMA_ATTRS */ -static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) -{ -} -static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) -{ - return 0; -} -#endif /* CONFIG_HAVE_DMA_ATTRS */ #endif /* _DMA_ATTR_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2e551e2d2d03..cc0517b71c5e 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -6,8 +6,12 @@ #include #include #include +#include #include #include +#include +#include +#include /* * A dma_addr_t can hold any valid DMA or bus address for the platform. @@ -86,7 +90,363 @@ static inline int is_device_dma_capable(struct device *dev) #ifdef CONFIG_HAS_DMA #include #else -#include +/* + * Define the dma api to allow compilation but not linking of + * dma dependent code. Code that depends on the dma-mapping + * API needs to set 'depends on HAS_DMA' in its Kconfig + */ +extern struct dma_map_ops bad_dma_ops; +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + return &bad_dma_ops; +} +#endif + +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(ptr, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, attrs); + debug_dma_map_page(dev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, addr, true); + return addr; +} + +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir, true); +} + +/* + * dma_maps_sg_attrs returns 0 on error and > 0 on success. + * It should never return a value < 0. + */ +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + int i, ents; + struct scatterlist *s; + + for_each_sg(sg, s, nents, i) + kmemcheck_mark_initialized(sg_virt(s), s->length); + BUG_ON(!valid_dma_direction(dir)); + ents = ops->map_sg(dev, sg, nents, dir, attrs); + BUG_ON(ents < 0); + debug_dma_map_sg(dev, sg, nents, ents, dir); + + return ents; +} + +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); + if (ops->unmap_sg) + ops->unmap_sg(dev, sg, nents, dir, attrs); +} + +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(page_address(page) + offset, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, page, offset, size, dir, NULL); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); + + return addr; +} + +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, NULL); + debug_dma_unmap_page(dev, addr, size, dir, false); +} + +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr, size, dir); + debug_dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr, size, dir); + debug_dma_sync_single_for_device(dev, addr, size, dir); +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr + offset, size, dir); + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr + offset, size, dir); + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_device) + ops->sync_sg_for_device(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); + +} + +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) + +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller); + +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); + +/** + * dma_mmap_attrs - map a coherent DMA allocation into user space + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices + * @vma: vm_area_struct describing requested user mapping + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs + * @handle: device-view address returned from dma_alloc_attrs + * @size: size of memory originally requested in dma_alloc_attrs + * @attrs: attributes of mapping properties requested in dma_alloc_attrs + * + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs + * into user space. The coherent DMA buffer must not be freed by the + * driver until the user space mapping has been released. + */ +static inline int +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->mmap) + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} + +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) + +int +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +static inline int +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->get_sgtable) + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, + attrs); + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); +} + +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) + +#ifndef arch_dma_alloc_attrs +#define arch_dma_alloc_attrs(dev, flag) (true) +#endif + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; + + BUG_ON(!ops); + + if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) + return cpu_addr; + + if (!arch_dma_alloc_attrs(&dev, &flag)) + return NULL; + if (!ops->alloc) + return NULL; + + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + return cpu_addr; +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!ops); + WARN_ON(irqs_disabled()); + + if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) + return; + + if (!ops->free) + return; + + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); +} + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); +} + +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); +} + +static inline void dma_free_noncoherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); +} + +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + debug_dma_mapping_error(dev, dma_addr); + + if (get_dma_ops(dev)->mapping_error) + return get_dma_ops(dev)->mapping_error(dev, dma_addr); + +#ifdef DMA_ERROR_CODE + return dma_addr == DMA_ERROR_CODE; +#else + return 0; +#endif +} + +#ifndef HAVE_ARCH_DMA_SUPPORTED +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (!ops) + return 0; + if (!ops->dma_supported) + return 1; + return ops->dma_supported(dev, mask); +} +#endif + +#ifndef HAVE_ARCH_DMA_SET_MASK +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops->set_dma_mask) + return ops->set_dma_mask(dev, mask); + + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + *dev->dma_mask = mask; + return 0; +} #endif static inline u64 dma_get_mask(struct device *dev) @@ -259,22 +619,6 @@ static inline void dmam_release_declared_memory(struct device *dev) } #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ -#ifndef CONFIG_HAVE_DMA_ATTRS -struct dma_attrs; - -#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \ - dma_map_single(dev, cpu_addr, size, dir) - -#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ - dma_unmap_single(dev, dma_addr, size, dir) - -#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \ - dma_map_sg(dev, sgl, nents, dir) - -#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ - dma_unmap_sg(dev, sgl, nents, dir) - -#else static inline void *dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) { @@ -300,7 +644,6 @@ static inline int dma_mmap_writecombine(struct device *dev, dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); } -#endif /* CONFIG_HAVE_DMA_ATTRS */ #ifdef CONFIG_NEED_DMA_MAP_STATE #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME -- cgit v1.2.3 From 1ecb4ae5f0aaf48b538b9e90b7b360215a2cf9ed Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 11 Feb 2016 16:13:20 -0800 Subject: arch/x86/Kconfig: CONFIG_X86_UV should depend on CONFIG_EFI arch/x86/built-in.o: In function `uv_bios_call': (.text+0xeba00): undefined reference to `efi_call' Reported-by: kbuild test robot Suggested-by: "H. Peter Anvin" Cc: Ingo Molnar Reviewed-by: Matt Fleming Acked-by: Alex Thorlton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/Kconfig') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9af2e6338400..ab2ed5328f0a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -475,6 +475,7 @@ config X86_UV depends on X86_64 depends on X86_EXTENDED_PLATFORM depends on NUMA + depends on EFI depends on X86_X2APIC depends on PCI ---help--- -- cgit v1.2.3 From 4e7f9df25874cedbbc604a5c5c2e7a6efe662387 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 11 Feb 2016 01:05:01 +0200 Subject: hpet: Drop stale URLs Looks like the HPET spec at intel.com got moved. It isn't hard to find so drop the link, just mention the revision assumed. Suggested-by: Thomas Gleixner Signed-off-by: Michael S. Tsirkin Acked-by: Greg Kroah-Hartman Cc: Arnd Bergmann Cc: Clemens Ladisch Cc: Jonathan Corbet Cc: Linus Torvalds Cc: Peter Zijlstra Cc: linux-doc@vger.kernel.org Link: http://lkml.kernel.org/r/1455145462-3877-1-git-send-email-mst@redhat.com Signed-off-by: Ingo Molnar --- Documentation/timers/hpet.txt | 4 +--- arch/x86/Kconfig | 4 ++-- drivers/char/hpet.c | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) (limited to 'arch/x86/Kconfig') diff --git a/Documentation/timers/hpet.txt b/Documentation/timers/hpet.txt index 767392ffd31e..a484d2c109d7 100644 --- a/Documentation/timers/hpet.txt +++ b/Documentation/timers/hpet.txt @@ -1,9 +1,7 @@ High Precision Event Timer Driver for Linux The High Precision Event Timer (HPET) hardware follows a specification -by Intel and Microsoft which can be found at - - http://www.intel.com/hardwaredesign/hpetspec_1.pdf +by Intel and Microsoft, revision 1. Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision") and up to 32 comparators. Normally three or more comparators are provided, diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ab2ed5328f0a..c46662f64c39 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -778,8 +778,8 @@ config HPET_TIMER HPET is the next generation timer replacing legacy 8254s. The HPET provides a stable time base on SMP systems, unlike the TSC, but it is more expensive to access, - as it is off-chip. You can find the HPET spec at - . + as it is off-chip. The interface used is documented + in the HPET spec, revision 1. You can safely choose Y here. However, HPET will only be activated if the platform and the BIOS support this feature. diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 240b6cf1d97c..be54e5331a45 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -42,7 +42,7 @@ /* * The High Precision Event Timer driver. * This driver is closely modelled after the rtc.c driver. - * http://www.intel.com/hardwaredesign/hpetspec_1.pdf + * See HPET spec revision 1. */ #define HPET_USER_FREQ (64) #define HPET_DRIFT (500) -- cgit v1.2.3