diff options
249 files changed, 3367 insertions, 2358 deletions
diff --git a/Documentation/arm/keystone/Overview.txt b/Documentation/arm/keystone/Overview.txt index f17bc4c9dff9..400c0c270d2e 100644 --- a/Documentation/arm/keystone/Overview.txt +++ b/Documentation/arm/keystone/Overview.txt @@ -49,24 +49,6 @@ specified through DTS. Following are the DTS used:- The device tree documentation for the keystone machines are located at Documentation/devicetree/bindings/arm/keystone/keystone.txt -Known issues & workaround -------------------------- - -Some of the device drivers used on keystone are re-used from that from -DaVinci and other TI SoCs. These device drivers may use clock APIs directly. -Some of the keystone specific drivers such as netcp uses run time power -management API instead to enable clock. As this API has limitations on -keystone, following workaround is needed to boot Linux. - - Add 'clk_ignore_unused' to the bootargs env variable in u-boot. Otherwise - clock frameworks will try to disable clocks that are unused and disable - the hardware. This is because netcp related power domain and clock - domains are enabled in u-boot as run time power management API currently - doesn't enable clocks for netcp due to a limitation. This workaround is - expected to be removed in the future when proper API support becomes - available. Until then, this work around is needed. - - Document Author --------------- Murali Karicheri <m-karicheri2@ti.com> diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index 2f6c6ff7161d..d8880ca30af4 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt @@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0 parameter. 1: The multi-queue block layer is instantiated with a hardware dispatch queue for each CPU node in the system. + +use_lightnvm=[0/1]: Default: 0 + Register device with LightNVM. Requires blk-mq to be used. diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt index b38200d2583a..0dfa60d88dd3 100644 --- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt @@ -1,7 +1,9 @@ * Temperature Sensor ADC (TSADC) on rockchip SoCs Required properties: -- compatible : "rockchip,rk3288-tsadc" +- compatible : should be "rockchip,<name>-tsadc" + "rockchip,rk3288-tsadc": found on RK3288 SoCs + "rockchip,rk3368-tsadc": found on RK3368 SoCs - reg : physical base address of the controller and length of memory mapped region. - interrupts : The interrupt number to the cpu. The interrupt specifier format diff --git a/MAINTAINERS b/MAINTAINERS index 050d0e77a2cf..58e290566ae2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -318,7 +318,7 @@ M: Zhang Rui <rui.zhang@intel.com> L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi S: Supported -F: drivers/acpi/video.c +F: drivers/acpi/acpi_video.c ACPI WMI DRIVER L: platform-driver-x86@vger.kernel.org @@ -1931,7 +1931,7 @@ S: Supported F: drivers/i2c/busses/i2c-at91.c ATMEL ISI DRIVER -M: Josh Wu <josh.wu@atmel.com> +M: Ludovic Desroches <ludovic.desroches@atmel.com> L: linux-media@vger.kernel.org S: Supported F: drivers/media/platform/soc_camera/atmel-isi.c @@ -1950,7 +1950,8 @@ S: Supported F: drivers/net/ethernet/cadence/ ATMEL NAND DRIVER -M: Josh Wu <josh.wu@atmel.com> +M: Wenyou Yang <wenyou.yang@atmel.com> +M: Josh Wu <rainyfeeling@outlook.com> L: linux-mtd@lists.infradead.org S: Supported F: drivers/mtd/nand/atmel_nand* @@ -6366,6 +6367,7 @@ F: arch/*/include/asm/pmem.h LIGHTNVM PLATFORM SUPPORT M: Matias Bjorling <mb@lightnvm.io> W: http://github/OpenChannelSSD +L: linux-block@vger.kernel.org S: Maintained F: drivers/lightnvm/ F: include/linux/lightnvm.h @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 4 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Blurry Fish Butt # *DOCUMENTATION* diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index c92c0ef1e9d2..f1ac9818b751 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index cfac24e0e7b6..323486d6ee83 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 9922a118a15a..66191cd0447e 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index f761a7c70761..f68838e8068a 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index dc6f74f41283..96bd1c20fb0b 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 3fef0a210c56..fcae66683ca0 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index 51784837daae..b01b659168ea 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index ef35ef3923dd..a07f20de221b 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_CROSS_MEMORY_ATTACH is not set diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 634509e5e572..f36c047b33ca 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_CROSS_MEMORY_ATTACH is not set diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h index ad481c24070d..258b0e5ad332 100644 --- a/arch/arc/include/asm/irqflags-arcv2.h +++ b/arch/arc/include/asm/irqflags-arcv2.h @@ -37,6 +37,9 @@ #define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ (ARCV2_IRQ_DEF_PRIO << 1)) +/* SLEEP needs default irq priority (<=) which can interrupt the doze */ +#define ISA_SLEEP_ARG (0x10 | ARCV2_IRQ_DEF_PRIO) + #ifndef __ASSEMBLY__ /* diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h index d8c608174617..c1d36458bfb7 100644 --- a/arch/arc/include/asm/irqflags-compact.h +++ b/arch/arc/include/asm/irqflags-compact.h @@ -43,6 +43,8 @@ #define ISA_INIT_STATUS_BITS STATUS_IE_MASK +#define ISA_SLEEP_ARG 0x3 + #ifndef __ASSEMBLY__ /****************************************************************** diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index c14a5bea0c76..5d446df2c413 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c @@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) "st sp, [r24] \n\t" #endif - "sync \n\t" - /* * setup _current_task with incoming tsk. * optionally, set r25 to that as well diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S index e248594097e7..e6890b1f8650 100644 --- a/arch/arc/kernel/ctx_sw_asm.S +++ b/arch/arc/kernel/ctx_sw_asm.S @@ -44,9 +44,6 @@ __switch_to: * don't need to do anything special to return it */ - /* hardware memory barrier */ - sync - /* * switch to new task, contained in r1 * Temp reg r3 is required to get the ptr to store val diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 91d5a0f1f3f7..a3f750e76b68 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls) void arch_cpu_idle(void) { /* sleep, but enable all interrupts before committing */ - if (is_isa_arcompact()) { - __asm__("sleep 0x3"); - } else { - __asm__("sleep 0x10"); - } + __asm__ __volatile__( + "sleep %0 \n" + : + :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */ } asmlinkage void ret_from_fork(void); diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 93c6ea52b671..7352475451f6 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -986,42 +986,13 @@ int arc_unwind(struct unwind_frame_info *frame) (const u8 *)(fde + 1) + *fde, ptrType); - if (pc >= endLoc) + if (pc >= endLoc) { fde = NULL; - } else - fde = NULL; - } - if (fde == NULL) { - for (fde = table->address, tableSize = table->size; - cie = NULL, tableSize > sizeof(*fde) - && tableSize - sizeof(*fde) >= *fde; - tableSize -= sizeof(*fde) + *fde, - fde += 1 + *fde / sizeof(*fde)) { - cie = cie_for_fde(fde, table); - if (cie == &bad_cie) { cie = NULL; - break; } - if (cie == NULL - || cie == ¬_fde - || (ptrType = fde_pointer_type(cie)) < 0) - continue; - ptr = (const u8 *)(fde + 2); - startLoc = read_pointer(&ptr, - (const u8 *)(fde + 1) + - *fde, ptrType); - if (!startLoc) - continue; - if (!(ptrType & DW_EH_PE_indirect)) - ptrType &= - DW_EH_PE_FORM | DW_EH_PE_signed; - endLoc = - startLoc + read_pointer(&ptr, - (const u8 *)(fde + - 1) + - *fde, ptrType); - if (pc >= startLoc && pc < endLoc) - break; + } else { + fde = NULL; + cie = NULL; } } } diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 0ee739846847..daf2bf52b984 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -619,10 +619,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); if (dirty) { - /* wback + inv dcache lines */ + /* wback + inv dcache lines (K-mapping) */ __flush_dcache_page(paddr, paddr); - /* invalidate any existing icache lines */ + /* invalidate any existing icache lines (U-mapping) */ if (vma->vm_flags & VM_EXEC) __inv_icache_page(paddr, vaddr); } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0365cbbc9179..34e1569a11ee 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -76,6 +76,8 @@ config ARM select IRQ_FORCED_THREADING select MODULES_USE_ELF_REL select NO_BOOTMEM + select OF_EARLY_FLATTREE if OF + select OF_RESERVED_MEM if OF select OLD_SIGACTION select OLD_SIGSUSPEND3 select PERF_USE_VMALLOC @@ -1822,8 +1824,6 @@ config USE_OF bool "Flattened Device Tree support" select IRQ_DOMAIN select OF - select OF_EARLY_FLATTREE - select OF_RESERVED_MEM help Include support for flattened device tree machine descriptions. diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index d9ba6b879fc1..00352e761b8c 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -604,6 +604,7 @@ reg = <0x6f>; interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>, <&dra7_pmx_core 0x424>; + interrupt-names = "irq", "wakeup"; pinctrl-names = "default"; pinctrl-0 = <&mcp79410_pins_default>; diff --git a/arch/arm/boot/dts/animeo_ip.dts b/arch/arm/boot/dts/animeo_ip.dts index 4e0ad3b82796..0962f2fa3f6e 100644 --- a/arch/arm/boot/dts/animeo_ip.dts +++ b/arch/arm/boot/dts/animeo_ip.dts @@ -155,21 +155,21 @@ label = "keyswitch_in"; gpios = <&pioB 1 GPIO_ACTIVE_HIGH>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; error_in { label = "error_in"; gpios = <&pioB 2 GPIO_ACTIVE_HIGH>; linux,code = <29>; - gpio-key,wakeup; + wakeup-source; }; btn { label = "btn"; gpios = <&pioC 23 GPIO_ACTIVE_HIGH>; linux,code = <31>; - gpio-key,wakeup; + wakeup-source; }; }; }; diff --git a/arch/arm/boot/dts/at91-foxg20.dts b/arch/arm/boot/dts/at91-foxg20.dts index f89598af4c2b..6bf873e7d96c 100644 --- a/arch/arm/boot/dts/at91-foxg20.dts +++ b/arch/arm/boot/dts/at91-foxg20.dts @@ -159,7 +159,7 @@ label = "Button"; gpios = <&pioC 4 GPIO_ACTIVE_LOW>; linux,code = <0x103>; - gpio-key,wakeup; + wakeup-source; }; }; }; diff --git a/arch/arm/boot/dts/at91-kizbox.dts b/arch/arm/boot/dts/at91-kizbox.dts index bf18ece0c027..229e989eb60d 100644 --- a/arch/arm/boot/dts/at91-kizbox.dts +++ b/arch/arm/boot/dts/at91-kizbox.dts @@ -24,15 +24,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <18432000>; - }; - main_xtal { clock-frequency = <18432000>; }; @@ -94,14 +85,14 @@ label = "PB_RST"; gpios = <&pioB 30 GPIO_ACTIVE_HIGH>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; user { label = "PB_USER"; gpios = <&pioB 31 GPIO_ACTIVE_HIGH>; linux,code = <0x101>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-kizbox2.dts b/arch/arm/boot/dts/at91-kizbox2.dts index f0b1563cb3f1..50a14568f094 100644 --- a/arch/arm/boot/dts/at91-kizbox2.dts +++ b/arch/arm/boot/dts/at91-kizbox2.dts @@ -171,21 +171,21 @@ label = "PB_PROG"; gpios = <&pioE 27 GPIO_ACTIVE_LOW>; linux,code = <0x102>; - gpio-key,wakeup; + wakeup-source; }; reset { label = "PB_RST"; gpios = <&pioE 29 GPIO_ACTIVE_LOW>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; user { label = "PB_USER"; gpios = <&pioE 31 GPIO_ACTIVE_HIGH>; linux,code = <0x101>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-kizboxmini.dts b/arch/arm/boot/dts/at91-kizboxmini.dts index 9f72b4932634..9682d105d4d8 100644 --- a/arch/arm/boot/dts/at91-kizboxmini.dts +++ b/arch/arm/boot/dts/at91-kizboxmini.dts @@ -98,14 +98,14 @@ label = "PB_PROG"; gpios = <&pioC 17 GPIO_ACTIVE_LOW>; linux,code = <0x102>; - gpio-key,wakeup; + wakeup-source; }; reset { label = "PB_RST"; gpios = <&pioC 16 GPIO_ACTIVE_LOW>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-qil_a9260.dts b/arch/arm/boot/dts/at91-qil_a9260.dts index a9aef53ab764..4f2eebf4a560 100644 --- a/arch/arm/boot/dts/at91-qil_a9260.dts +++ b/arch/arm/boot/dts/at91-qil_a9260.dts @@ -183,7 +183,7 @@ label = "user_pb"; gpios = <&pioB 10 GPIO_ACTIVE_LOW>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts index e07c2b206beb..ad6de73ed5a5 100644 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts @@ -45,6 +45,7 @@ /dts-v1/; #include "sama5d2.dtsi" #include "sama5d2-pinfunc.h" +#include <dt-bindings/mfd/atmel-flexcom.h> / { model = "Atmel SAMA5D2 Xplained"; @@ -59,15 +60,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -91,6 +83,22 @@ status = "okay"; }; + sdmmc0: sdio-host@a0000000 { + bus-width = <8>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sdmmc0_default>; + non-removable; + mmc-ddr-1_8v; + status = "okay"; + }; + + sdmmc1: sdio-host@b0000000 { + bus-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sdmmc1_default>; + status = "okay"; /* conflict with qspi0 */ + }; + apb { spi0: spi@f8000000 { pinctrl-names = "default"; @@ -181,12 +189,49 @@ }; }; + flx0: flexcom@f8034000 { + atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>; + status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */ + + uart5: serial@200 { + compatible = "atmel,at91sam9260-usart"; + reg = <0x200 0x200>; + interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&flx0_clk>; + clock-names = "usart"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flx0_default>; + atmel,fifo-size = <32>; + status = "okay"; + }; + }; + uart3: serial@fc008000 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart3_default>; status = "okay"; }; + flx4: flexcom@fc018000 { + atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>; + status = "okay"; + + i2c2: i2c@600 { + compatible = "atmel,sama5d2-i2c"; + reg = <0x600 0x200>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>; + dmas = <0>, <0>; + dma-names = "tx", "rx"; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&flx4_clk>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flx4_default>; + atmel,fifo-size = <16>; + status = "okay"; + }; + }; + i2c1: i2c@fc028000 { dmas = <0>, <0>; pinctrl-names = "default"; @@ -201,6 +246,18 @@ }; pinctrl@fc038000 { + pinctrl_flx0_default: flx0_default { + pinmux = <PIN_PB28__FLEXCOM0_IO0>, + <PIN_PB29__FLEXCOM0_IO1>; + bias-disable; + }; + + pinctrl_flx4_default: flx4_default { + pinmux = <PIN_PD12__FLEXCOM4_IO0>, + <PIN_PD13__FLEXCOM4_IO1>; + bias-disable; + }; + pinctrl_i2c0_default: i2c0_default { pinmux = <PIN_PD21__TWD0>, <PIN_PD22__TWCK0>; @@ -227,6 +284,46 @@ bias-disable; }; + pinctrl_sdmmc0_default: sdmmc0_default { + cmd_data { + pinmux = <PIN_PA1__SDMMC0_CMD>, + <PIN_PA2__SDMMC0_DAT0>, + <PIN_PA3__SDMMC0_DAT1>, + <PIN_PA4__SDMMC0_DAT2>, + <PIN_PA5__SDMMC0_DAT3>, + <PIN_PA6__SDMMC0_DAT4>, + <PIN_PA7__SDMMC0_DAT5>, + <PIN_PA8__SDMMC0_DAT6>, + <PIN_PA9__SDMMC0_DAT7>; + bias-pull-up; + }; + + ck_cd_rstn_vddsel { + pinmux = <PIN_PA0__SDMMC0_CK>, + <PIN_PA10__SDMMC0_RSTN>, + <PIN_PA11__SDMMC0_VDDSEL>, + <PIN_PA13__SDMMC0_CD>; + bias-disable; + }; + }; + + pinctrl_sdmmc1_default: sdmmc1_default { + cmd_data { + pinmux = <PIN_PA28__SDMMC1_CMD>, + <PIN_PA18__SDMMC1_DAT0>, + <PIN_PA19__SDMMC1_DAT1>, + <PIN_PA20__SDMMC1_DAT2>, + <PIN_PA21__SDMMC1_DAT3>; + bias-pull-up; + }; + + conf-ck_cd { + pinmux = <PIN_PA22__SDMMC1_CK>, + <PIN_PA30__SDMMC1_CD>; + bias-disable; + }; + }; + pinctrl_spi0_default: spi0_default { pinmux = <PIN_PA14__SPI0_SPCK>, <PIN_PA15__SPI0_MOSI>, diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts index 8488ac53d22d..ff888d21c786 100644 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -315,7 +315,7 @@ label = "PB_USER"; gpios = <&pioE 29 GPIO_ACTIVE_LOW>; linux,code = <0x104>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts index 45371a1b61b3..131614f28e75 100644 --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts @@ -50,7 +50,6 @@ compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5"; chosen { - bootargs = "ignore_loglevel earlyprintk"; stdout-path = "serial0:115200n8"; }; @@ -59,15 +58,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -235,7 +225,7 @@ label = "pb_user1"; gpios = <&pioE 8 GPIO_ACTIVE_HIGH>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts index 6d272c0125e3..2d4a33100af6 100644 --- a/arch/arm/boot/dts/at91-sama5d4ek.dts +++ b/arch/arm/boot/dts/at91-sama5d4ek.dts @@ -50,7 +50,6 @@ compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5"; chosen { - bootargs = "ignore_loglevel earlyprintk"; stdout-path = "serial0:115200n8"; }; @@ -59,15 +58,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -304,7 +294,7 @@ label = "pb_user1"; gpios = <&pioE 13 GPIO_ACTIVE_HIGH>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91rm9200ek.dts b/arch/arm/boot/dts/at91rm9200ek.dts index 8dab4b75ca97..f90e1c2d3caa 100644 --- a/arch/arm/boot/dts/at91rm9200ek.dts +++ b/arch/arm/boot/dts/at91rm9200ek.dts @@ -21,15 +21,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <18432000>; - }; - slow_xtal { clock-frequency = <32768>; }; diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts index 2e92ac020f23..55bd51f07fa6 100644 --- a/arch/arm/boot/dts/at91sam9261ek.dts +++ b/arch/arm/boot/dts/at91sam9261ek.dts @@ -22,15 +22,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <18432000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -149,7 +140,7 @@ ti,debounce-tol = /bits/ 16 <65535>; ti,debounce-max = /bits/ 16 <1>; - linux,wakeup; + wakeup-source; }; }; @@ -193,28 +184,28 @@ label = "button_0"; gpios = <&pioA 27 GPIO_ACTIVE_LOW>; linux,code = <256>; - gpio-key,wakeup; + wakeup-source; }; button_1 { label = "button_1"; gpios = <&pioA 26 GPIO_ACTIVE_LOW>; linux,code = <257>; - gpio-key,wakeup; + wakeup-source; }; button_2 { label = "button_2"; gpios = <&pioA 25 GPIO_ACTIVE_LOW>; linux,code = <258>; - gpio-key,wakeup; + wakeup-source; }; button_3 { label = "button_3"; gpios = <&pioA 24 GPIO_ACTIVE_LOW>; linux,code = <259>; - gpio-key,wakeup; + wakeup-source; }; }; }; diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts index 23381276ffb8..59df9d73d276 100644 --- a/arch/arm/boot/dts/at91sam9263ek.dts +++ b/arch/arm/boot/dts/at91sam9263ek.dts @@ -22,15 +22,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <16367660>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -213,14 +204,14 @@ label = "left_click"; gpios = <&pioC 5 GPIO_ACTIVE_LOW>; linux,code = <272>; - gpio-key,wakeup; + wakeup-source; }; right_click { label = "right_click"; gpios = <&pioC 4 GPIO_ACTIVE_LOW>; linux,code = <273>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index 57548a2c5a1e..e9cc99b6353a 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi @@ -19,15 +19,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <18432000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -206,14 +197,14 @@ label = "Button 3"; gpios = <&pioA 30 GPIO_ACTIVE_LOW>; linux,code = <0x103>; - gpio-key,wakeup; + wakeup-source; }; btn4 { label = "Button 4"; gpios = <&pioA 31 GPIO_ACTIVE_LOW>; linux,code = <0x104>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts index 9d16ef8453c5..2400c99134f7 100644 --- a/arch/arm/boot/dts/at91sam9m10g45ek.dts +++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts @@ -24,15 +24,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -323,14 +314,14 @@ label = "left_click"; gpios = <&pioB 6 GPIO_ACTIVE_LOW>; linux,code = <272>; - gpio-key,wakeup; + wakeup-source; }; right_click { label = "right_click"; gpios = <&pioB 7 GPIO_ACTIVE_LOW>; linux,code = <273>; - gpio-key,wakeup; + wakeup-source; }; left { diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index acf3451a332d..ca4ddf86817a 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -23,15 +23,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <16000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -219,7 +210,7 @@ label = "Enter"; gpios = <&pioB 3 GPIO_ACTIVE_LOW>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts index 558c9f220bed..f10566f759cd 100644 --- a/arch/arm/boot/dts/at91sam9rlek.dts +++ b/arch/arm/boot/dts/at91sam9rlek.dts @@ -22,15 +22,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -225,14 +216,14 @@ label = "right_click"; gpios = <&pioB 0 GPIO_ACTIVE_LOW>; linux,code = <273>; - gpio-key,wakeup; + wakeup-source; }; left_click { label = "left_click"; gpios = <&pioB 1 GPIO_ACTIVE_LOW>; linux,code = <272>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi index 26112ebd15fc..b098ad8cd93a 100644 --- a/arch/arm/boot/dts/at91sam9x5cm.dtsi +++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi @@ -13,17 +13,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - }; - - clocks { slow_xtal { clock-frequency = <32768>; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index bc672fb91466..fe99231cbde5 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1459,8 +1459,8 @@ interrupt-names = "tx", "rx"; dmas = <&sdma_xbar 133>, <&sdma_xbar 132>; dma-names = "tx", "rx"; - clocks = <&mcasp3_ahclkx_mux>; - clock-names = "fck"; + clocks = <&mcasp3_aux_gfclk_mux>, <&mcasp3_ahclkx_mux>; + clock-names = "fck", "ahclkx"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/k2l-netcp.dtsi b/arch/arm/boot/dts/k2l-netcp.dtsi index 01aef230773d..5acbd0dcc2ab 100644 --- a/arch/arm/boot/dts/k2l-netcp.dtsi +++ b/arch/arm/boot/dts/k2l-netcp.dtsi @@ -137,7 +137,7 @@ netcp: netcp@26000000 { /* NetCP address range */ ranges = <0 0x26000000 0x1000000>; - clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>; + clocks = <&clkosr>, <&papllclk>, <&clkcpgmac>, <&chipclk12>; dma-coherent; ti,navigator-dmas = <&dma_gbe 0>, diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi index c56ab6bbfe3c..0e46560551f4 100644 --- a/arch/arm/boot/dts/kirkwood-ts219.dtsi +++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi @@ -40,7 +40,7 @@ }; poweroff@12100 { compatible = "qnap,power-off"; - reg = <0x12000 0x100>; + reg = <0x12100 0x100>; clocks = <&gate_clk 7>; }; spi@10600 { diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts index 8fd8ef2c72da..85f0373df498 100644 --- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts +++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts @@ -86,6 +86,10 @@ }; }; +&emmc { + /delete-property/mmc-hs200-1_8v; +}; + &gpio_keys { pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 6a79c9c526b8..04ea209f1737 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -452,8 +452,10 @@ clock-names = "tsadc", "apb_pclk"; resets = <&cru SRST_TSADC>; reset-names = "tsadc-apb"; - pinctrl-names = "default"; - pinctrl-0 = <&otp_out>; + pinctrl-names = "init", "default", "sleep"; + pinctrl-0 = <&otp_gpio>; + pinctrl-1 = <&otp_out>; + pinctrl-2 = <&otp_gpio>; #thermal-sensor-cells = <1>; rockchip,hw-tshut-temp = <95000>; status = "disabled"; @@ -1395,6 +1397,10 @@ }; tsadc { + otp_gpio: otp-gpio { + rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_none>; + }; + otp_out: otp-out { rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>; }; diff --git a/arch/arm/boot/dts/sama5d35ek.dts b/arch/arm/boot/dts/sama5d35ek.dts index d9a9aca1ccfd..e812f5c1bf70 100644 --- a/arch/arm/boot/dts/sama5d35ek.dts +++ b/arch/arm/boot/dts/sama5d35ek.dts @@ -49,7 +49,7 @@ label = "pb_user1"; gpios = <&pioE 27 GPIO_ACTIVE_HIGH>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; }; }; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 15bbaf690047..2193637b9cd2 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -1300,7 +1300,7 @@ }; watchdog@fc068640 { - compatible = "atmel,at91sam9260-wdt"; + compatible = "atmel,sama5d4-wdt"; reg = <0xfc068640 0x10>; clocks = <&clk32k>; status = "disabled"; diff --git a/arch/arm/boot/dts/usb_a9260_common.dtsi b/arch/arm/boot/dts/usb_a9260_common.dtsi index 12edafefd44a..9beea8976584 100644 --- a/arch/arm/boot/dts/usb_a9260_common.dtsi +++ b/arch/arm/boot/dts/usb_a9260_common.dtsi @@ -115,7 +115,7 @@ label = "user_pb"; gpios = <&pioB 10 GPIO_ACTIVE_LOW>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/usb_a9263.dts b/arch/arm/boot/dts/usb_a9263.dts index 68c0de36c339..8cc6edb29694 100644 --- a/arch/arm/boot/dts/usb_a9263.dts +++ b/arch/arm/boot/dts/usb_a9263.dts @@ -143,7 +143,7 @@ label = "user_pb"; gpios = <&pioB 10 GPIO_ACTIVE_LOW>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi index 6736bae43a5b..0d5acc2cdc8e 100644 --- a/arch/arm/boot/dts/vfxxx.dtsi +++ b/arch/arm/boot/dts/vfxxx.dtsi @@ -158,7 +158,7 @@ interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks VF610_CLK_DSPI0>; clock-names = "dspi"; - spi-num-chipselects = <5>; + spi-num-chipselects = <6>; status = "disabled"; }; @@ -170,7 +170,7 @@ interrupts = <68 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks VF610_CLK_DSPI1>; clock-names = "dspi"; - spi-num-chipselects = <5>; + spi-num-chipselects = <4>; status = "disabled"; }; @@ -461,6 +461,8 @@ clock-names = "adc"; #io-channel-cells = <1>; status = "disabled"; + fsl,adck-max-frequency = <30000000>, <40000000>, + <20000000>; }; esdhc0: esdhc@400b1000 { @@ -472,8 +474,6 @@ <&clks VF610_CLK_ESDHC0>; clock-names = "ipg", "ahb", "per"; status = "disabled"; - fsl,adck-max-frequency = <30000000>, <40000000>, - <20000000>; }; esdhc1: esdhc@400b2000 { diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index 1b1e5acd76e2..e4b1be66b3f5 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig @@ -125,7 +125,6 @@ CONFIG_POWER_RESET=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_AT91SAM9X_WATCHDOG=y -CONFIG_SSB=m CONFIG_MFD_ATMEL_HLCDC=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index a0c57ac88b27..63f7e6ce649a 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig @@ -129,7 +129,6 @@ CONFIG_GPIO_SYSFS=y CONFIG_POWER_SUPPLY=y CONFIG_POWER_RESET=y # CONFIG_HWMON is not set -CONFIG_SSB=m CONFIG_MFD_ATMEL_FLEXCOM=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index be1d07d59ee9..1bd9510de1b9 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -40,6 +40,11 @@ extern void arch_trigger_all_cpu_backtrace(bool); #define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) #endif +static inline int nr_legacy_irqs(void) +{ + return NR_IRQS_LEGACY; +} + #endif #endif diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 7a2a32a1d5a8..ede692ffa32e 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -416,6 +416,7 @@ #define __NR_execveat (__NR_SYSCALL_BASE+387) #define __NR_userfaultfd (__NR_SYSCALL_BASE+388) #define __NR_membarrier (__NR_SYSCALL_BASE+389) +#define __NR_mlock2 (__NR_SYSCALL_BASE+390) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 6551d28c27e6..066f7f9ba411 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -17,11 +17,6 @@ #include <asm/mach/pci.h> static int debug_pci; -static resource_size_t (*align_resource)(struct pci_dev *dev, - const struct resource *res, - resource_size_t start, - resource_size_t size, - resource_size_t align) = NULL; /* * We can't use pci_get_device() here since we are @@ -461,7 +456,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, sys->busnr = busnr; sys->swizzle = hw->swizzle; sys->map_irq = hw->map_irq; - align_resource = hw->align_resource; INIT_LIST_HEAD(&sys->resources); if (hw->private_data) @@ -470,6 +464,8 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, ret = hw->setup(nr, sys); if (ret > 0) { + struct pci_host_bridge *host_bridge; + ret = pcibios_init_resources(nr, sys); if (ret) { kfree(sys); @@ -491,6 +487,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, busnr = sys->bus->busn_res.end + 1; list_add(&sys->node, head); + + host_bridge = pci_find_host_bridge(sys->bus); + host_bridge->align_resource = hw->align_resource; } else { kfree(sys); if (ret < 0) @@ -578,14 +577,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, { struct pci_dev *dev = data; resource_size_t start = res->start; + struct pci_host_bridge *host_bridge; if (res->flags & IORESOURCE_IO && start & 0x300) start = (start + 0x3ff) & ~0x3ff; start = (start + align - 1) & ~(align - 1); - if (align_resource) - return align_resource(dev, res, start, size, align); + host_bridge = pci_find_host_bridge(dev->bus); + + if (host_bridge->align_resource) + return host_bridge->align_resource(dev, res, + start, size, align); return start; } diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index fde6c88d560c..ac368bb068d1 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -399,6 +399,7 @@ CALL(sys_execveat) CALL(sys_userfaultfd) CALL(sys_membarrier) + CALL(sys_mlock2) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index eab83b2435b8..e06fd299de08 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -564,17 +564,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) vcpu_sleep(vcpu); /* - * Disarming the background timer must be done in a - * preemptible context, as this call may sleep. - */ - kvm_timer_flush_hwstate(vcpu); - - /* * Preparing the interrupts to be injected also * involves poking the GIC, which must be done in a * non-preemptible context. */ preempt_disable(); + kvm_timer_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu); local_irq_disable(); diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 6984342da13d..7dace909d5cf 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud) __kvm_flush_dcache_pud(pud); } +static bool kvm_is_device_pfn(unsigned long pfn) +{ + return !pfn_valid(pfn); +} + /** * stage2_dissolve_pmd() - clear and flush huge PMD entry * @kvm: pointer to kvm structure. @@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, kvm_tlb_flush_vmid_ipa(kvm, addr); /* No need to invalidate the cache for device mappings */ - if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) + if (!kvm_is_device_pfn(__phys_to_pfn(addr))) kvm_flush_dcache_pte(old_pte); put_page(virt_to_page(pte)); @@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, pte = pte_offset_kernel(pmd, addr); do { - if (!pte_none(*pte) && - (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) + if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr))) kvm_flush_dcache_pte(*pte); } while (pte++, addr += PAGE_SIZE, addr != end); } @@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) return kvm_vcpu_dabt_iswrite(vcpu); } -static bool kvm_is_device_pfn(unsigned long pfn) -{ - return !pfn_valid(pfn); -} - /** * stage2_wp_ptes - write protect PMD range * @pmd: pointer to pmd entry diff --git a/arch/arm/mach-dove/include/mach/entry-macro.S b/arch/arm/mach-dove/include/mach/entry-macro.S index 72d622baaad3..df1d44bdc375 100644 --- a/arch/arm/mach-dove/include/mach/entry-macro.S +++ b/arch/arm/mach-dove/include/mach/entry-macro.S @@ -18,13 +18,13 @@ @ check low interrupts ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF] ldr \tmp, [\base, #IRQ_MASK_LOW_OFF] - mov \irqnr, #31 + mov \irqnr, #32 ands \irqstat, \irqstat, \tmp @ if no low interrupts set, check high interrupts ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF] ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF] - moveq \irqnr, #63 + moveq \irqnr, #64 andeqs \irqstat, \irqstat, \tmp @ find first active interrupt source diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 8e7976a4c3e7..cfc696b972f3 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -177,6 +177,7 @@ static struct irq_chip imx_gpc_chip = { .irq_unmask = imx_gpc_irq_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_wake = imx_gpc_irq_set_wake, + .irq_set_type = irq_chip_set_type_parent, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 5305ec7341ec..79e1f876d1c9 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -143,9 +143,9 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * Ensure that CPU power state is set to ON to avoid CPU * powerdomain transition on wfi */ - clkdm_wakeup(cpu1_clkdm); - omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON); - clkdm_allow_idle(cpu1_clkdm); + clkdm_wakeup_nolock(cpu1_clkdm); + pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON); + clkdm_allow_idle_nolock(cpu1_clkdm); if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { while (gic_dist_disabled()) { diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index cc8a987149e2..48495ad82aba 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -890,6 +890,36 @@ static int _init_opt_clks(struct omap_hwmod *oh) return ret; } +static void _enable_optional_clocks(struct omap_hwmod *oh) +{ + struct omap_hwmod_opt_clk *oc; + int i; + + pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); + + for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) + if (oc->_clk) { + pr_debug("omap_hwmod: enable %s:%s\n", oc->role, + __clk_get_name(oc->_clk)); + clk_enable(oc->_clk); + } +} + +static void _disable_optional_clocks(struct omap_hwmod *oh) +{ + struct omap_hwmod_opt_clk *oc; + int i; + + pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); + + for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) + if (oc->_clk) { + pr_debug("omap_hwmod: disable %s:%s\n", oc->role, + __clk_get_name(oc->_clk)); + clk_disable(oc->_clk); + } +} + /** * _enable_clocks - enable hwmod main clock and interface clocks * @oh: struct omap_hwmod * @@ -917,6 +947,9 @@ static int _enable_clocks(struct omap_hwmod *oh) clk_enable(os->_clk); } + if (oh->flags & HWMOD_OPT_CLKS_NEEDED) + _enable_optional_clocks(oh); + /* The opt clocks are controlled by the device driver. */ return 0; @@ -948,41 +981,14 @@ static int _disable_clocks(struct omap_hwmod *oh) clk_disable(os->_clk); } + if (oh->flags & HWMOD_OPT_CLKS_NEEDED) + _disable_optional_clocks(oh); + /* The opt clocks are controlled by the device driver. */ return 0; } -static void _enable_optional_clocks(struct omap_hwmod *oh) -{ - struct omap_hwmod_opt_clk *oc; - int i; - - pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); - - for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) - if (oc->_clk) { - pr_debug("omap_hwmod: enable %s:%s\n", oc->role, - __clk_get_name(oc->_clk)); - clk_enable(oc->_clk); - } -} - -static void _disable_optional_clocks(struct omap_hwmod *oh) -{ - struct omap_hwmod_opt_clk *oc; - int i; - - pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); - - for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) - if (oc->_clk) { - pr_debug("omap_hwmod: disable %s:%s\n", oc->role, - __clk_get_name(oc->_clk)); - clk_disable(oc->_clk); - } -} - /** * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 * @oh: struct omap_hwmod * diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index ca6df1a73475..76bce11c85a4 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -523,6 +523,8 @@ struct omap_hwmod_omap4_prcm { * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up * events by calling _reconfigure_io_chain() when a device is enabled * or idled. + * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to + * operate and they need to be handled at the same time as the main_clk. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -538,6 +540,7 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_FORCE_MSTANDBY (1 << 11) #define HWMOD_SWSUP_SIDLE_ACT (1 << 12) #define HWMOD_RECONFIG_IO_CHAIN (1 << 13) +#define HWMOD_OPT_CLKS_NEEDED (1 << 14) /* * omap_hwmod._int_flags definitions diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 51d1ecb384bd..ee4e04434a94 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1298,6 +1298,44 @@ static struct omap_hwmod dra7xx_mcspi4_hwmod = { }; /* + * 'mcasp' class + * + */ +static struct omap_hwmod_class_sysconfig dra7xx_mcasp_sysc = { + .sysc_offs = 0x0004, + .sysc_flags = SYSC_HAS_SIDLEMODE, + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type3, +}; + +static struct omap_hwmod_class dra7xx_mcasp_hwmod_class = { + .name = "mcasp", + .sysc = &dra7xx_mcasp_sysc, +}; + +/* mcasp3 */ +static struct omap_hwmod_opt_clk mcasp3_opt_clks[] = { + { .role = "ahclkx", .clk = "mcasp3_ahclkx_mux" }, +}; + +static struct omap_hwmod dra7xx_mcasp3_hwmod = { + .name = "mcasp3", + .class = &dra7xx_mcasp_hwmod_class, + .clkdm_name = "l4per2_clkdm", + .main_clk = "mcasp3_aux_gfclk_mux", + .flags = HWMOD_OPT_CLKS_NEEDED, + .prcm = { + .omap4 = { + .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP3_CLKCTRL_OFFSET, + .context_offs = DRA7XX_RM_L4PER2_MCASP3_CONTEXT_OFFSET, + .modulemode = MODULEMODE_SWCTRL, + }, + }, + .opt_clks = mcasp3_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(mcasp3_opt_clks), +}; + +/* * 'mmc' class * */ @@ -2566,6 +2604,22 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; +/* l4_per2 -> mcasp3 */ +static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp3 = { + .master = &dra7xx_l4_per2_hwmod, + .slave = &dra7xx_mcasp3_hwmod, + .clk = "l4_root_clk_div", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +/* l3_main_1 -> mcasp3 */ +static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp3 = { + .master = &dra7xx_l3_main_1_hwmod, + .slave = &dra7xx_mcasp3_hwmod, + .clk = "l3_iclk_div", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + /* l4_per1 -> elm */ static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = { .master = &dra7xx_l4_per1_hwmod, @@ -3308,6 +3362,8 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { &dra7xx_l4_wkup__dcan1, &dra7xx_l4_per2__dcan2, &dra7xx_l4_per2__cpgmac0, + &dra7xx_l4_per2__mcasp3, + &dra7xx_l3_main_1__mcasp3, &dra7xx_gmac__mdio, &dra7xx_l4_cfg__dma_system, &dra7xx_l3_main_1__dss, diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c index b1288f56d509..6256052893ec 100644 --- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c @@ -144,6 +144,7 @@ static struct omap_hwmod dm81xx_l4_ls_hwmod = { .name = "l4_ls", .clkdm_name = "alwon_l3s_clkdm", .class = &l4_hwmod_class, + .flags = HWMOD_NO_IDLEST, }; /* @@ -155,6 +156,7 @@ static struct omap_hwmod dm81xx_l4_hs_hwmod = { .name = "l4_hs", .clkdm_name = "alwon_l3_med_clkdm", .class = &l4_hwmod_class, + .flags = HWMOD_NO_IDLEST, }; /* L3 slow -> L4 ls peripheral interface running at 125MHz */ @@ -850,6 +852,7 @@ static struct omap_hwmod dm816x_emac0_hwmod = { .name = "emac0", .clkdm_name = "alwon_ethernet_clkdm", .class = &dm816x_emac_hwmod_class, + .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = { diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 1dfe34654c43..58144779dec4 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -24,9 +24,6 @@ #include <linux/platform_data/iommu-omap.h> #include <linux/platform_data/wkup_m3.h> -#include <asm/siginfo.h> -#include <asm/signal.h> - #include "common.h" #include "common-board-devices.h" #include "dss-common.h" @@ -385,29 +382,6 @@ static void __init omap3_pandora_legacy_init(void) } #endif /* CONFIG_ARCH_OMAP3 */ -#ifdef CONFIG_SOC_TI81XX -static int fault_fixed_up; - -static int t410_abort_handler(unsigned long addr, unsigned int fsr, - struct pt_regs *regs) -{ - if ((fsr == 0x406 || fsr == 0xc06) && !fault_fixed_up) { - pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n", - addr, fsr); - fault_fixed_up = 1; - return 0; - } - - return 1; -} - -static void __init t410_abort_init(void) -{ - hook_fault_code(16 + 6, t410_abort_handler, SIGBUS, BUS_OBJERR, - "imprecise external abort"); -} -#endif - #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) static struct iommu_platform_data omap4_iommu_pdata = { .reset_name = "mmu_cache", @@ -536,9 +510,6 @@ static struct pdata_init pdata_quirks[] __initdata = { { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, }, { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, }, #endif -#ifdef CONFIG_SOC_TI81XX - { "hp,t410", t410_abort_init, }, -#endif #ifdef CONFIG_SOC_OMAP5 { "ti,omap5-uevm", omap5_uevm_legacy_init, }, #endif diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 87b98bf92366..2dbd3785ee6f 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -301,11 +301,11 @@ static void omap3_pm_idle(void) if (omap_irq_pending()) return; - trace_cpu_idle(1, smp_processor_id()); + trace_cpu_idle_rcuidle(1, smp_processor_id()); omap_sram_idle(); - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_SUSPEND diff --git a/arch/arm/mach-orion5x/include/mach/entry-macro.S b/arch/arm/mach-orion5x/include/mach/entry-macro.S index 79eb502a1e64..73919a36b577 100644 --- a/arch/arm/mach-orion5x/include/mach/entry-macro.S +++ b/arch/arm/mach-orion5x/include/mach/entry-macro.S @@ -21,5 +21,5 @@ @ find cause bits that are unmasked ands \irqstat, \irqstat, \tmp @ clear Z flag if any clzne \irqnr, \irqstat @ calc irqnr - rsbne \irqnr, \irqnr, #31 + rsbne \irqnr, \irqnr, #32 .endm diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 13eba2b26e0a..8fbfb10047ec 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c @@ -344,7 +344,7 @@ void __init palm27x_pwm_init(int bl, int lcd) { palm_bl_power = bl; palm_lcd_power = lcd; - pwm_add_lookup(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup)); + pwm_add_table(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup)); platform_device_register(&palm27x_backlight); } #endif diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c index aebf6de62468..0b5c3876720c 100644 --- a/arch/arm/mach-pxa/palmtc.c +++ b/arch/arm/mach-pxa/palmtc.c @@ -169,7 +169,7 @@ static inline void palmtc_keys_init(void) {} #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) static struct pwm_lookup palmtc_pwm_lookup[] = { PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS, - PWM_PERIOD_NORMAL), + PWM_POLARITY_NORMAL), }; static struct platform_pwm_backlight_data palmtc_backlight_data = { diff --git a/arch/arm/mach-shmobile/setup-r8a7793.c b/arch/arm/mach-shmobile/setup-r8a7793.c index 1d2825cb7a65..5fce87f7f254 100644 --- a/arch/arm/mach-shmobile/setup-r8a7793.c +++ b/arch/arm/mach-shmobile/setup-r8a7793.c @@ -19,7 +19,7 @@ #include "common.h" #include "rcar-gen2.h" -static const char *r8a7793_boards_compat_dt[] __initconst = { +static const char * const r8a7793_boards_compat_dt[] __initconst = { "renesas,r8a7793", NULL, }; diff --git a/arch/arm/mach-zx/Kconfig b/arch/arm/mach-zx/Kconfig index 7fdc5bf24f9b..446334a25cf5 100644 --- a/arch/arm/mach-zx/Kconfig +++ b/arch/arm/mach-zx/Kconfig @@ -13,7 +13,7 @@ config SOC_ZX296702 select ARM_GLOBAL_TIMER select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP - select PM_GENERIC_DOMAINS + select PM_GENERIC_DOMAINS if PM help Support for ZTE ZX296702 SoC which is a dual core CortexA9MP endif diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9ac16a482ff1..871f21783866 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -49,7 +49,7 @@ config ARM64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP + select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK @@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075 If unsure, say Y. +config ARM64_ERRATUM_834220 + bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault" + depends on KVM + default y + help + This option adds an alternative code sequence to work around ARM + erratum 834220 on Cortex-A57 parts up to r1p2. + + Affected Cortex-A57 parts might report a Stage 2 translation + fault as the result of a Stage 1 fault for load crossing a + page boundary when there is a permission or device memory + alignment fault at Stage 1 and a translation fault at Stage 2. + + The workaround is to verify that the Stage 1 translation + doesn't generate a fault before handling the Stage 2 fault. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + config ARM64_ERRATUM_845719 bool "Cortex-A53: 845719: a load might read incorrect data" depends on COMPAT diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 11d5bb0fdd54..8f271b83f910 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -29,8 +29,9 @@ #define ARM64_HAS_PAN 4 #define ARM64_HAS_LSE_ATOMICS 5 #define ARM64_WORKAROUND_CAVIUM_23154 6 +#define ARM64_WORKAROUND_834220 7 -#define ARM64_NCAPS 7 +#define ARM64_NCAPS 8 #ifndef __ASSEMBLY__ @@ -46,8 +47,12 @@ enum ftr_type { #define FTR_STRICT true /* SANITY check strict matching required */ #define FTR_NONSTRICT false /* SANITY check ignored */ +#define FTR_SIGNED true /* Value should be treated as signed */ +#define FTR_UNSIGNED false /* Value should be treated as unsigned */ + struct arm64_ftr_bits { - bool strict; /* CPU Sanity check: strict matching required ? */ + bool sign; /* Value is signed ? */ + bool strict; /* CPU Sanity check: strict matching required ? */ enum ftr_type type; u8 shift; u8 width; @@ -123,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field) return cpuid_feature_extract_field_width(features, field, 4); } +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) +{ + return (u64)(features << (64 - width - field)) >> (64 - width); +} + +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field(u64 features, int field) +{ + return cpuid_feature_extract_unsigned_field_width(features, field, 4); +} + static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); @@ -130,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) { - return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width); + return ftrp->sign ? + cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) : + cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width); } static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index e54415ec6935..9732908bfc8a 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp; /* Determine number of BRP registers available. */ static inline int get_num_brps(void) { + u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); return 1 + - cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), + cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_BRPS_SHIFT); } /* Determine number of WRP registers available. */ static inline int get_num_wrps(void) { + u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); return 1 + - cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), + cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_WRPS_SHIFT); } diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index 23eb450b820b..8e8d30684392 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -7,4 +7,9 @@ struct pt_regs; extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); +static inline int nr_legacy_irqs(void) +{ + return 0; +} + #endif diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 17e92f05b1fe..3ca894ecf699 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; } +/* + * vcpu_reg should always be passed a register number coming from a + * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32 + * with banked registers. + */ static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) { - if (vcpu_mode_is_32bit(vcpu)) - return vcpu_reg32(vcpu, reg_num); - return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; } diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 24926f2504f7..feb6b4efa641 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { (1 << MIDR_VARIANT_SHIFT) | 2), }, #endif +#ifdef CONFIG_ARM64_ERRATUM_834220 + { + /* Cortex-A57 r0p0 - r1p2 */ + .desc = "ARM erratum 834220", + .capability = ARM64_WORKAROUND_834220, + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, + (1 << MIDR_VARIANT_SHIFT) | 2), + }, +#endif #ifdef CONFIG_ARM64_ERRATUM_845719 { /* Cortex-A53 r0p[01234] */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c8cf89223b5a..0669c63281ea 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); -#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ +#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ { \ + .sign = SIGNED, \ .strict = STRICT, \ .type = TYPE, \ .shift = SHIFT, \ @@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); .safe_val = SAFE_VAL, \ } +/* Define a feature with signed values */ +#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + +/* Define a feature with unsigned value */ +#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + #define ARM64_FTR_END \ { \ .width = 0, \ @@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { * Differing PARange is fine as long as all peripherals and memory are mapped * within the minimum PARange of all CPUs */ - ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { }; static struct arm64_ftr_bits ftr_ctr[] = { - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ /* * Linux can handle differing I-cache policies. Userspace JITs will * make use of *minLine */ - ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ ARM64_FTR_END, }; @@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = { static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index fc5508e0df57..4eeb17198cfa 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -127,7 +127,11 @@ static int __init uefi_init(void) table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; config_tables = early_memremap(efi_to_phys(efi.systab->tables), table_size); - + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sizeof(efi_config_table_64_t), NULL); @@ -209,6 +213,14 @@ void __init efi_init(void) PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); memmap.phys_map = params.mmap; memmap.map = early_memremap(params.mmap, params.mmap_size); + if (memmap.map == NULL) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } memmap.map_end = memmap.map + params.mmap_size; memmap.desc_size = params.desc_size; memmap.desc_version = params.desc_ver; @@ -227,7 +239,6 @@ static bool __init efi_virtmap_init(void) init_new_context(NULL, &efi_mm); for_each_efi_memory_desc(&memmap, md) { - u64 paddr, npages, size; pgprot_t prot; if (!(md->attribute & EFI_MEMORY_RUNTIME)) @@ -235,11 +246,6 @@ static bool __init efi_virtmap_init(void) if (md->virt_addr == 0) return false; - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - pr_info(" EFI remap 0x%016llx => %p\n", md->phys_addr, (void *)md->virt_addr); @@ -256,7 +262,8 @@ static bool __init efi_virtmap_init(void) else prot = PAGE_KERNEL; - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, + create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr, + md->num_pages << EFI_PAGE_SHIFT, __pgprot(pgprot_val(prot) | PTE_NG)); } return true; @@ -273,12 +280,12 @@ static int __init arm64_enable_runtime_services(void) if (!efi_enabled(EFI_BOOT)) { pr_info("EFI services will not be available.\n"); - return -1; + return 0; } if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); - return -1; + return 0; } pr_info("Remapping and enabling EFI services.\n"); @@ -288,7 +295,7 @@ static int __init arm64_enable_runtime_services(void) mapsize); if (!memmap.map) { pr_err("Failed to remap EFI memory map\n"); - return -1; + return -ENOMEM; } memmap.map_end = memmap.map + mapsize; efi.memmap = &memmap; @@ -297,13 +304,13 @@ static int __init arm64_enable_runtime_services(void) sizeof(efi_system_table_t)); if (!efi.systab) { pr_err("Failed to remap EFI System Table\n"); - return -1; + return -ENOMEM; } set_bit(EFI_SYSTEM_TABLES, &efi.flags); if (!efi_virtmap_init()) { pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); - return -1; + return -ENOMEM; } /* Set up runtime services function pointers */ diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 1599701ef044..86c289832272 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context) ENDPROC(__kvm_flush_vm_context) __kvm_hyp_panic: + // Stash PAR_EL1 before corrupting it in __restore_sysregs + mrs x0, par_el1 + push x0, xzr + // Guess the context by looking at VTTBR: // If zero, then we're already a host. // Otherwise restore a minimal host context before panicing. @@ -898,7 +902,7 @@ __kvm_hyp_panic: mrs x3, esr_el2 mrs x4, far_el2 mrs x5, hpfar_el2 - mrs x6, par_el1 + pop x6, xzr // active context PAR_EL1 mrs x7, tpidr_el2 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ @@ -914,7 +918,7 @@ __kvm_hyp_panic: ENDPROC(__kvm_hyp_panic) __hyp_panic_str: - .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" + .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" .align 2 @@ -1015,9 +1019,15 @@ el1_trap: b.ne 1f // Not an abort we care about /* This is an abort. Check for permission fault */ +alternative_if_not ARM64_WORKAROUND_834220 and x2, x1, #ESR_ELx_FSC_TYPE cmp x2, #FSC_PERM b.ne 1f // Not a permission fault +alternative_else + nop // Use the permission fault path to + nop // check for a valid S1 translation, + nop // regardless of the ESR value. +alternative_endif /* * Check for Stage-1 page table walk, which is guaranteed diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 85c57158dcd9..648112e90ed5 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) /* Note: These now point to the banked copies */ *vcpu_spsr(vcpu) = new_spsr_value; - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; + *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; /* Branch to exception vector */ if (sctlr & (1 << 13)) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index f636a2639f03..e87f53ff5f58 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu) __flush_icache_all(); } -static int is_reserved_asid(u64 asid) +static bool check_update_reserved_asid(u64 asid, u64 newasid) { int cpu; - for_each_possible_cpu(cpu) - if (per_cpu(reserved_asids, cpu) == asid) - return 1; - return 0; + bool hit = false; + + /* + * Iterate over the set of reserved ASIDs looking for a match. + * If we find one, then we can update our mm to use newasid + * (i.e. the same ASID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old ASID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved ASID in a future + * generation. + */ + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_asids, cpu) == asid) { + hit = true; + per_cpu(reserved_asids, cpu) = newasid; + } + } + + return hit; } static u64 new_context(struct mm_struct *mm, unsigned int cpu) @@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { + u64 newasid = generation | (asid & ~ASID_MASK); + /* * If our current ASID was active during a rollover, we * can continue to use it and this was just a false alarm. */ - if (is_reserved_asid(asid)) - return generation | (asid & ~ASID_MASK); + if (check_update_reserved_asid(asid, newasid)) + return newasid; /* * We had a valid ASID in a previous life, so try to re-use @@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) */ asid &= ~ASID_MASK; if (!__test_and_set_bit(asid, asid_map)) - goto bump_gen; + return newasid; } /* @@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - -bump_gen: - asid |= generation; - return asid; + return asid | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 19211c4a8911..92ddac1e8ca2 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -393,16 +393,16 @@ static struct fault_info { { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, - { do_bad, SIGBUS, 0, "reserved access flag fault" }, + { do_bad, SIGBUS, 0, "unknown 8" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, - { do_bad, SIGBUS, 0, "reserved permission fault" }, + { do_bad, SIGBUS, 0, "unknown 12" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_bad, SIGBUS, 0, "synchronous external abort" }, - { do_bad, SIGBUS, 0, "asynchronous external abort" }, + { do_bad, SIGBUS, 0, "unknown 17" }, { do_bad, SIGBUS, 0, "unknown 18" }, { do_bad, SIGBUS, 0, "unknown 19" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, @@ -410,16 +410,16 @@ static struct fault_info { { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous parity error" }, - { do_bad, SIGBUS, 0, "asynchronous parity error" }, + { do_bad, SIGBUS, 0, "unknown 25" }, { do_bad, SIGBUS, 0, "unknown 26" }, { do_bad, SIGBUS, 0, "unknown 27" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, { do_bad, SIGBUS, 0, "unknown 32" }, { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, - { do_bad, SIGBUS, 0, "debug event" }, + { do_bad, SIGBUS, 0, "unknown 34" }, { do_bad, SIGBUS, 0, "unknown 35" }, { do_bad, SIGBUS, 0, "unknown 36" }, { do_bad, SIGBUS, 0, "unknown 37" }, @@ -433,21 +433,21 @@ static struct fault_info { { do_bad, SIGBUS, 0, "unknown 45" }, { do_bad, SIGBUS, 0, "unknown 46" }, { do_bad, SIGBUS, 0, "unknown 47" }, - { do_bad, SIGBUS, 0, "unknown 48" }, + { do_bad, SIGBUS, 0, "TLB conflict abort" }, { do_bad, SIGBUS, 0, "unknown 49" }, { do_bad, SIGBUS, 0, "unknown 50" }, { do_bad, SIGBUS, 0, "unknown 51" }, { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, - { do_bad, SIGBUS, 0, "unknown 53" }, + { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" }, { do_bad, SIGBUS, 0, "unknown 54" }, { do_bad, SIGBUS, 0, "unknown 55" }, { do_bad, SIGBUS, 0, "unknown 56" }, { do_bad, SIGBUS, 0, "unknown 57" }, - { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, + { do_bad, SIGBUS, 0, "unknown 58" }, { do_bad, SIGBUS, 0, "unknown 59" }, { do_bad, SIGBUS, 0, "unknown 60" }, - { do_bad, SIGBUS, 0, "unknown 61" }, - { do_bad, SIGBUS, 0, "unknown 62" }, + { do_bad, SIGBUS, 0, "section domain fault" }, + { do_bad, SIGBUS, 0, "page domain fault" }, { do_bad, SIGBUS, 0, "unknown 63" }, }; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index abb66f84d4ac..873e363048c6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot); static void __init *early_alloc(unsigned long sz) { - void *ptr = __va(memblock_alloc(sz, sz)); - BUG_ON(!ptr); + phys_addr_t phys; + void *ptr; + + phys = memblock_alloc(sz, sz); + BUG_ON(!phys); + ptr = __va(phys); memset(ptr, 0, sz); return ptr; } @@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte) do { /* * Need to have the least restrictive permissions available - * permissions will be fixed up later. Default the new page - * range as contiguous ptes. + * permissions will be fixed up later */ - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); + set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); pfn++; } while (pte++, i++, i < PTRS_PER_PTE); } -/* - * Given a PTE with the CONT bit set, determine where the CONT range - * starts, and clear the entire range of PTE CONT bits. - */ -static void clear_cont_pte_range(pte_t *pte, unsigned long addr) -{ - int i; - - pte -= CONT_RANGE_OFFSET(addr); - for (i = 0; i < CONT_PTES; i++) { - set_pte(pte, pte_mknoncont(*pte)); - pte++; - } - flush_tlb_all(); -} - -/* - * Given a range of PTEs set the pfn and provided page protection flags - */ -static void __populate_init_pte(pte_t *pte, unsigned long addr, - unsigned long end, phys_addr_t phys, - pgprot_t prot) -{ - unsigned long pfn = __phys_to_pfn(phys); - - do { - /* clear all the bits except the pfn, then apply the prot */ - set_pte(pte, pfn_pte(pfn, prot)); - pte++; - pfn++; - addr += PAGE_SIZE; - } while (addr != end); -} - static void alloc_init_pte(pmd_t *pmd, unsigned long addr, - unsigned long end, phys_addr_t phys, + unsigned long end, unsigned long pfn, pgprot_t prot, void *(*alloc)(unsigned long size)) { pte_t *pte; - unsigned long next; if (pmd_none(*pmd) || pmd_sect(*pmd)) { pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); @@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, pte = pte_offset_kernel(pmd, addr); do { - next = min(end, (addr + CONT_SIZE) & CONT_MASK); - if (((addr | next | phys) & ~CONT_MASK) == 0) { - /* a block of CONT_PTES */ - __populate_init_pte(pte, addr, next, phys, - __pgprot(pgprot_val(prot) | PTE_CONT)); - } else { - /* - * If the range being split is already inside of a - * contiguous range but this PTE isn't going to be - * contiguous, then we want to unmark the adjacent - * ranges, then update the portion of the range we - * are interrested in. - */ - clear_cont_pte_range(pte, addr); - __populate_init_pte(pte, addr, next, phys, prot); - } - - pte += (next - addr) >> PAGE_SHIFT; - phys += next - addr; - addr = next; - } while (addr != end); + set_pte(pte, pfn_pte(pfn, prot)); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); } static void split_pud(pud_t *old_pud, pmd_t *pmd) @@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, } } } else { - alloc_init_pte(pmd, addr, next, phys, prot, alloc); + alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), + prot, alloc); } phys += next - addr; } while (pmd++, addr = next, addr != end); diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c index f7836c6a6b60..c32f76791f48 100644 --- a/arch/m68k/coldfire/m54xx.c +++ b/arch/m68k/coldfire/m54xx.c @@ -98,7 +98,7 @@ static void __init mcf54xx_bootmem_alloc(void) memstart = PAGE_ALIGN(_ramstart); min_low_pfn = PFN_DOWN(_rambase); start_pfn = PFN_DOWN(memstart); - max_low_pfn = PFN_DOWN(_ramend); + max_pfn = max_low_pfn = PFN_DOWN(_ramend); high_memory = (void *)_ramend; m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 0793a7f17417..f9d96bf86910 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 375 +#define NR_syscalls 376 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 5e6fae6c275f..36cf129de663 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -380,5 +380,6 @@ #define __NR_sendmmsg 372 #define __NR_userfaultfd 373 #define __NR_membarrier 374 +#define __NR_mlock2 375 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 88c27d94a721..76b9113f3092 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -238,11 +238,14 @@ void __init setup_arch(char **cmdline_p) * Give all the memory to the bootmap allocator, tell it to put the * boot mem_map at the start of memory. */ + min_low_pfn = PFN_DOWN(memory_start); + max_pfn = max_low_pfn = PFN_DOWN(memory_end); + bootmap_size = init_bootmem_node( NODE_DATA(0), - memory_start >> PAGE_SHIFT, /* map goes here */ - PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */ - memory_end >> PAGE_SHIFT); + min_low_pfn, /* map goes here */ + PFN_DOWN(PAGE_OFFSET), + max_pfn); /* * Free the usable memory, we have to make sure we do not free * the bootmem bitmap so we then reserve it after freeing it :-) diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 5dd0e80042f5..282cd903f4c4 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -395,3 +395,4 @@ ENTRY(sys_call_table) .long sys_sendmmsg .long sys_userfaultfd .long sys_membarrier + .long sys_mlock2 /* 375 */ diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index b958916e5eac..8f37fdd80be9 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -250,7 +250,7 @@ void __init paging_init(void) high_memory = phys_to_virt(max_addr); min_low_pfn = availmem >> PAGE_SHIFT; - max_low_pfn = max_addr >> PAGE_SHIFT; + max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT; for (i = 0; i < m68k_num_memory; i++) { addr = m68k_memory[i].addr; diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c index a8b942bf7163..2a5f43a68ae3 100644 --- a/arch/m68k/sun3/config.c +++ b/arch/m68k/sun3/config.c @@ -118,13 +118,13 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start, memory_end = memory_end & PAGE_MASK; start_page = __pa(memory_start) >> PAGE_SHIFT; - num_pages = __pa(memory_end) >> PAGE_SHIFT; + max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT; high_memory = (void *)memory_end; availmem = memory_start; m68k_setup_node(0); - availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); + availmem += init_bootmem(start_page, num_pages); availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; free_bootmem(__pa(availmem), memory_end - (availmem)); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index d5fa3eaf39a1..41b1b090f56f 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, base = (inst >> 21) & 0x1f; op_inst = (inst >> 16) & 0x1f; - offset = inst & 0xffff; + offset = (int16_t)inst; cache = (inst >> 16) & 0x3; op = (inst >> 18) & 0x7; diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 7bab3a4e8f7d..7e2210846b8b 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S @@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run) FEXPORT(__kvm_mips_load_asid) /* Set the ASID for the Guest Kernel */ - INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ - /* addresses shift to 0x80000000 */ - bltz t0, 1f /* If kernel */ + PTR_L t0, VCPU_COP0(k1) + LONG_L t0, COP0_STATUS(t0) + andi t0, KSU_USER | ST0_ERL | ST0_EXL + xori t0, KSU_USER + bnez t0, 1f /* If kernel */ INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 1: @@ -474,9 +476,11 @@ __kvm_mips_return_to_guest: mtc0 t0, CP0_EPC /* Set the ASID for the Guest Kernel */ - INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ - /* addresses shift to 0x80000000 */ - bltz t0, 1f /* If kernel */ + PTR_L t0, VCPU_COP0(k1) + LONG_L t0, COP0_STATUS(t0) + andi t0, KSU_USER | ST0_ERL | ST0_EXL + xori t0, KSU_USER + bnez t0, 1f /* If kernel */ INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 1: diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 49ff3bfc007e..b9b803facdbf 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) if (!gebase) { err = -ENOMEM; - goto out_free_cpu; + goto out_uninit_cpu; } kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", ALIGN(size, PAGE_SIZE), gebase); @@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) out_free_gebase: kfree(gebase); +out_uninit_cpu: + kvm_vcpu_uninit(vcpu); + out_free_cpu: kfree(vcpu); diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index 8a978022630b..dbbeccc3d714 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -11,6 +11,7 @@ * by the Free Software Foundation. */ +#include <linux/delay.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/io.h> @@ -232,8 +233,7 @@ static int rt288x_pci_probe(struct platform_device *pdev) ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); - for (i = 0; i < 0xfffff; i++) - ; + udelay(1); rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c index 4f925e06c414..78b2ef49dbc7 100644 --- a/arch/mips/pmcs-msp71xx/msp_setup.c +++ b/arch/mips/pmcs-msp71xx/msp_setup.c @@ -10,6 +10,8 @@ * option) any later version. */ +#include <linux/delay.h> + #include <asm/bootinfo.h> #include <asm/cacheflush.h> #include <asm/idle.h> @@ -77,7 +79,7 @@ void msp7120_reset(void) */ /* Wait a bit for the DDRC to settle */ - for (i = 0; i < 100000000; i++); + mdelay(125); #if defined(CONFIG_PMC_MSP7120_GW) /* diff --git a/arch/mips/sni/reset.c b/arch/mips/sni/reset.c index 244f9427625b..db8f88b6a3af 100644 --- a/arch/mips/sni/reset.c +++ b/arch/mips/sni/reset.c @@ -3,6 +3,8 @@ * * Reset a SNI machine. */ +#include <linux/delay.h> + #include <asm/io.h> #include <asm/reboot.h> #include <asm/sni.h> @@ -32,9 +34,9 @@ void sni_machine_restart(char *command) for (;;) { for (i = 0; i < 100; i++) { kb_wait(); - for (j = 0; j < 100000 ; j++) - /* nothing */; + udelay(50); outb_p(0xfe, 0x64); /* pulse reset low */ + udelay(50); } } } diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 223cdcc8203f..87bf88ed04c6 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c @@ -23,22 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end) end += (cpuinfo.dcache_line_size - 1); end &= ~(cpuinfo.dcache_line_size - 1); - for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { - __asm__ __volatile__ (" flushda 0(%0)\n" - : /* Outputs */ - : /* Inputs */ "r"(addr) - /* : No clobber */); - } -} - -static void __flush_dcache_all(unsigned long start, unsigned long end) -{ - unsigned long addr; - - start &= ~(cpuinfo.dcache_line_size - 1); - end += (cpuinfo.dcache_line_size - 1); - end &= ~(cpuinfo.dcache_line_size - 1); - if (end > start + cpuinfo.dcache_size) end = start + cpuinfo.dcache_size; @@ -112,7 +96,7 @@ static void flush_aliases(struct address_space *mapping, struct page *page) void flush_cache_all(void) { - __flush_dcache_all(0, cpuinfo.dcache_size); + __flush_dcache(0, cpuinfo.dcache_size); __flush_icache(0, cpuinfo.icache_size); } @@ -182,7 +166,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) */ unsigned long start = (unsigned long)page_address(page); - __flush_dcache_all(start, start + PAGE_SIZE); + __flush_dcache(start, start + PAGE_SIZE); } void flush_dcache_page(struct page *page) @@ -268,7 +252,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, { flush_cache_page(vma, user_vaddr, page_to_pfn(page)); memcpy(dst, src, len); - __flush_dcache_all((unsigned long)src, (unsigned long)src + len); + __flush_dcache((unsigned long)src, (unsigned long)src + len); if (vma->vm_flags & VM_EXEC) __flush_icache((unsigned long)src, (unsigned long)src + len); } @@ -279,7 +263,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, { flush_cache_page(vma, user_vaddr, page_to_pfn(page)); memcpy(dst, src, len); - __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len); + __flush_dcache((unsigned long)dst, (unsigned long)dst + len); if (vma->vm_flags & VM_EXEC) __flush_icache((unsigned long)dst, (unsigned long)dst + len); } diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a908ada8e0a5..2220f7a60def 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -108,6 +108,7 @@ #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ +#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */ #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 75b6676c1a0b..646bf4d222c1 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr, msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; } + /* + * Use the current MSR TM suspended bit to track if we have + * checkpointed state outstanding. + * On signal delivery, we'd normally reclaim the checkpointed + * state to obtain stack pointer (see:get_tm_stackpointer()). + * This will then directly return to userspace without going + * through __switch_to(). However, if the stack frame is bad, + * we need to exit this thread which calls __switch_to() which + * will again attempt to reclaim the already saved tm state. + * Hence we need to check that we've not already reclaimed + * this state. + * We do this using the current MSR, rather tracking it in + * some specific thread_struct bit, as it has the additional + * benifit of checking for a potential TM bad thing exception. + */ + if (!MSR_TM_SUSPENDED(mfmsr())) + return; + tm_reclaim(thr, thr->regs->msr, cause); /* Having done the reclaim, we now have the checkpointed diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 0dbee465af7a..ef7c24e84a62 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs, return 1; #endif /* CONFIG_SPE */ + /* Get the top half of the MSR from the user context */ + if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) + return 1; + msr_hi <<= 32; + /* If TM bits are set to the reserved value, it's an invalid context */ + if (MSR_TM_RESV(msr_hi)) + return 1; + /* Pull in the MSR TM bits from the user context */ + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); /* Now, recheckpoint. This loads up all of the checkpointed (older) * registers, including FP and V[S]Rs. After recheckpointing, the * transactional versions should be loaded. @@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(¤t->thread, msr); - /* Get the top half of the MSR */ - if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) - return 1; - /* Pull in MSR TM from user context */ - regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 20756dfb9f34..c676ecec0869 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -438,6 +438,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); + /* Don't allow reserved mode. */ + if (MSR_TM_RESV(msr)) + return -EINVAL; + /* pull in MSR TM from user context */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 373e32346d68..6a75352f453c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) src_id, 0); /* sending vcpu invalid */ - if (src_id >= KVM_MAX_VCPUS || - kvm_get_vcpu(vcpu->kvm, src_id) == NULL) + if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) return -EINVAL; if (sclp.has_sigpif) @@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, irq->u.emerg.code, 0); + /* sending vcpu invalid */ + if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) + return -EINVAL; + set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); atomic_or(CPUSTAT_EXT_INT, li->cpuflags); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8fe2f1c722dc..846589281b04 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) r = 0; break; case KVM_CAP_S390_VECTOR_REGISTERS: - if (MACHINE_HAS_VX) { + mutex_lock(&kvm->lock); + if (atomic_read(&kvm->online_vcpus)) { + r = -EBUSY; + } else if (MACHINE_HAS_VX) { set_kvm_facility(kvm->arch.model.fac->mask, 129); set_kvm_facility(kvm->arch.model.fac->list, 129); r = 0; } else r = -EINVAL; + mutex_unlock(&kvm->lock); VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", r ? "(not available)" : "(success)"); break; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 77191b85ea7a..d76b51cb4b62 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) kvm_s390_get_regs_rre(vcpu, ®1, ®2); - if (!MACHINE_HAS_PFMF) + if (!test_kvm_facility(vcpu->kvm, 8)) return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index da690b69f9fe..77c22d685c7a 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, u16 cpu_addr, u32 parameter, u64 *status_reg) { int rc; - struct kvm_vcpu *dst_vcpu; + struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; - - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); if (!dst_vcpu) return SIGP_CC_NOT_OPERATIONAL; @@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); if (order_code == SIGP_EXTERNAL_CALL) { - dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); + dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); BUG_ON(dest_vcpu == NULL); kvm_s390_vcpu_wakeup(dest_vcpu); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 87acc5221740..af823a388c19 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) switch (type) { case VMX_VPID_EXTENT_ALL_CONTEXT: - if (get_vmcs12(vcpu)->virtual_processor_id == 0) { - nested_vmx_failValid(vcpu, - VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return 1; - } __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); nested_vmx_succeed(vcpu); break; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 00462bd63129..eed32283d22c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, return 0; } +static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) +{ + return (!lapic_in_kernel(vcpu) || + kvm_apic_accept_pic_intr(vcpu)); +} + +/* + * if userspace requested an interrupt window, check that the + * interrupt window is open. + * + * No need to exit to userspace if we already have an interrupt queued. + */ +static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) +{ + return kvm_arch_interrupt_allowed(vcpu) && + !kvm_cpu_has_interrupt(vcpu) && + !kvm_event_needs_reinjection(vcpu) && + kvm_cpu_accept_dm_intr(vcpu); +} + static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { @@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, return -EEXIST; vcpu->arch.pending_external_vector = irq->irq; + kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } @@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); } -/* - * Check if userspace requested an interrupt window, and that the - * interrupt window is open. - * - * No need to exit to userspace if we already have an interrupt queued. - */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { - if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm)) - return false; - - if (kvm_cpu_has_interrupt(vcpu)) - return false; - - return (irqchip_split(vcpu->kvm) - ? kvm_apic_accept_pic_intr(vcpu) - : kvm_arch_interrupt_allowed(vcpu)); + return vcpu->run->request_interrupt_window && + likely(!pic_in_kernel(vcpu->kvm)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) @@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); - if (!irqchip_in_kernel(vcpu->kvm)) - kvm_run->ready_for_interrupt_injection = - kvm_arch_interrupt_allowed(vcpu) && - !kvm_cpu_has_interrupt(vcpu) && - !kvm_event_needs_reinjection(vcpu); - else if (!pic_in_kernel(vcpu->kvm)) - kvm_run->ready_for_interrupt_injection = - kvm_apic_accept_pic_intr(vcpu) && - !kvm_cpu_has_interrupt(vcpu); - else - kvm_run->ready_for_interrupt_injection = 1; + kvm_run->ready_for_interrupt_injection = + pic_in_kernel(vcpu->kvm) || + kvm_vcpu_ready_for_interrupt_injection(vcpu); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) @@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; - bool req_int_win = !lapic_in_kernel(vcpu) && - vcpu->run->request_interrupt_window; + bool req_int_win = + dm_request_for_irq_injection(vcpu) && + kvm_cpu_accept_dm_intr(vcpu); + bool req_immediate_exit = false; if (vcpu->requests) { @@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu) if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); - if (dm_request_for_irq_injection(vcpu)) { + if (dm_request_for_irq_injection(vcpu) && + kvm_vcpu_ready_for_interrupt_injection(vcpu)) { r = 0; vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; ++vcpu->stat.request_irq_exits; diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c index 7bcf06a7cd12..6eb3c8af96e2 100644 --- a/arch/x86/pci/bus_numa.c +++ b/arch/x86/pci/bus_numa.c @@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources) if (!found) pci_add_resource(resources, &info->busn); - list_for_each_entry(root_res, &info->resources, list) { - struct resource *res; - struct resource *root; + list_for_each_entry(root_res, &info->resources, list) + pci_add_resource(resources, &root_res->res); - res = &root_res->res; - pci_add_resource(resources, res); - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - else - root = &iomem_resource; - insert_resource(root, res); - } return; default_resources: diff --git a/block/blk-merge.c b/block/blk-merge.c index de5716d8e525..41a55ba0d78e 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, struct bio_vec bv, bvprv, *bvprvp = NULL; struct bvec_iter iter; unsigned seg_size = 0, nsegs = 0, sectors = 0; + unsigned front_seg_size = bio->bi_seg_front_size; + bool do_split = true; + struct bio *new = NULL; bio_for_each_segment(bv, bio, iter) { if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) @@ -98,7 +101,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, seg_size += bv.bv_len; bvprv = bv; - bvprvp = &bv; + bvprvp = &bvprv; sectors += bv.bv_len >> 9; continue; } @@ -108,16 +111,29 @@ new_segment: nsegs++; bvprv = bv; - bvprvp = &bv; + bvprvp = &bvprv; seg_size = bv.bv_len; sectors += bv.bv_len >> 9; + + if (nsegs == 1 && seg_size > front_seg_size) + front_seg_size = seg_size; } - *segs = nsegs; - return NULL; + do_split = false; split: *segs = nsegs; - return bio_split(bio, sectors, GFP_NOIO, bs); + + if (do_split) { + new = bio_split(bio, sectors, GFP_NOIO, bs); + if (new) + bio = new; + } + + bio->bi_seg_front_size = front_seg_size; + if (seg_size > bio->bi_seg_back_size) + bio->bi_seg_back_size = seg_size; + + return do_split ? new : NULL; } void blk_queue_split(struct request_queue *q, struct bio **bio, @@ -412,6 +428,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, if (sg) sg_mark_end(sg); + /* + * Something must have been wrong if the figured number of + * segment is bigger than number of req's physical segments + */ + WARN_ON(nsegs > rq->nr_phys_segments); + return nsegs; } EXPORT_SYMBOL(blk_rq_map_sg); diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ae09de62f19..6d6f8feb48c0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_mq_bio_to_request(rq, bio); /* - * we do limited pluging. If bio can be merged, do merge. + * We do limited pluging. If the bio can be merged, do that. * Otherwise the existing request in the plug list will be * issued. So the plug list will have one request at most */ if (plug) { /* * The plug list might get flushed before this. If that - * happens, same_queue_rq is invalid and plug list is empty - **/ + * happens, same_queue_rq is invalid and plug list is + * empty + */ if (same_queue_rq && !list_empty(&plug->mq_list)) { old_rq = same_queue_rq; list_del_init(&old_rq->queuelist); @@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) blk_mq_bio_to_request(rq, bio); if (!request_count) trace_block_plug(q); - else if (request_count >= BLK_MAX_REQUEST_COUNT) { + + blk_mq_put_ctx(data.ctx); + + if (request_count >= BLK_MAX_REQUEST_COUNT) { blk_flush_plug_list(plug, false); trace_block_plug(q); } + list_add_tail(&rq->queuelist, &plug->mq_list); - blk_mq_put_ctx(data.ctx); return cookie; } diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 246dfb16c3d9..aa40aa93381b 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -158,11 +158,13 @@ void blk_abort_request(struct request *req) { if (blk_mark_rq_complete(req)) return; - blk_delete_timer(req); - if (req->q->mq_ops) + + if (req->q->mq_ops) { blk_mq_rq_timed_out(req, false); - else + } else { + blk_delete_timer(req); blk_rq_timed_out(req); + } } EXPORT_SYMBOL_GPL(blk_abort_request); diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 3de89d4690f3..a163c487cf38 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c @@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq, static int noop_dispatch(struct request_queue *q, int force) { struct noop_data *nd = q->elevator->elevator_data; + struct request *rq; - if (!list_empty(&nd->queue)) { - struct request *rq; - rq = list_entry(nd->queue.next, struct request, queuelist); + rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); + if (rq) { list_del_init(&rq->queuelist); elv_dispatch_sort(q, rq); return 1; @@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq) if (rq->queuelist.prev == &nd->queue) return NULL; - return list_entry(rq->queuelist.prev, struct request, queuelist); + return list_prev_entry(rq, queuelist); } static struct request * @@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq) if (rq->queuelist.next == &nd->queue) return NULL; - return list_entry(rq->queuelist.next, struct request, queuelist); + return list_next_entry(rq, queuelist); } static int noop_init_queue(struct request_queue *q, struct elevator_type *e) diff --git a/block/partitions/mac.c b/block/partitions/mac.c index c2c48ec64b27..621317ac4d59 100644 --- a/block/partitions/mac.c +++ b/block/partitions/mac.c @@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) Sector sect; unsigned char *data; int slot, blocks_in_map; - unsigned secsize; + unsigned secsize, datasize, partoffset; #ifdef CONFIG_PPC_PMAC int found_root = 0; int found_root_goodness = 0; @@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) } secsize = be16_to_cpu(md->block_size); put_dev_sector(sect); - data = read_part_sector(state, secsize/512, §); + datasize = round_down(secsize, 512); + data = read_part_sector(state, datasize / 512, §); if (!data) return -1; - part = (struct mac_partition *) (data + secsize%512); + partoffset = secsize % 512; + if (partoffset + sizeof(*part) > datasize) + return -1; + part = (struct mac_partition *) (data + partoffset); if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { put_dev_sector(sect); return 0; /* not a MacOS disk */ diff --git a/drivers/Makefile b/drivers/Makefile index 73d039156ea7..795d0ca714bf 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ obj-$(CONFIG_PARPORT) += parport/ +obj-$(CONFIG_NVM) += lightnvm/ obj-y += base/ block/ misc/ mfd/ nfc/ obj-$(CONFIG_LIBNVDIMM) += nvdimm/ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ @@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ obj-$(CONFIG_SCSI) += scsi/ -obj-$(CONFIG_NVM) += lightnvm/ obj-y += nvme/ obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_TARGET_CORE) += target/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 25dbb76c02cc..5eef4cb4f70e 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED bool config ACPI_DEBUGGER - bool "In-kernel debugger (EXPERIMENTAL)" + bool "AML debugger interface (EXPERIMENTAL)" select ACPI_DEBUG help - Enable in-kernel debugging facilities: statistics, internal + Enable in-kernel debugging of AML facilities: statistics, internal object dump, single step control method execution. This is still under development, currently enabling this only results in the compilation of the ACPICA debugger files. diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 850d7bf0c873..ae3fe4e64203 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info) else continue; + /* + * Some legacy x86 host bridge drivers use iomem_resource and + * ioport_resource as default resource pool, skip it. + */ + if (res == root) + continue; + conflict = insert_resource_conflict(root, res); if (conflict) { dev_info(&info->bridge->dev, diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e03b1ad25a90..167418e73445 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1775,10 +1775,10 @@ int genpd_dev_pm_attach(struct device *dev) } pd = of_genpd_get_from_provider(&pd_args); + of_node_put(pd_args.np); if (IS_ERR(pd)) { dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); - of_node_put(dev->of_node); return -EPROBE_DEFER; } @@ -1796,7 +1796,6 @@ int genpd_dev_pm_attach(struct device *dev) if (ret < 0) { dev_err(dev, "failed to add to PM domain %s: %d", pd->name, ret); - of_node_put(dev->of_node); goto out; } diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index e60dd12e23aa..1e937ac5f456 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) struct gpd_timing_data *td; s64 constraint_ns; - if (!pdd->dev->driver) - continue; - /* * Check if the device is allowed to be off long enough for the * domain to turn off and on (that's how much time it will diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index a28a562f7b7f..3457ac8c03e2 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd) sector_t capacity; unsigned int index = 0; struct kobject *kobj; - unsigned char thd_name[16]; if (dd->disk) goto skip_create_disk; /* hw init done, before rebuild */ @@ -3958,10 +3957,9 @@ skip_create_disk: } start_service_thread: - sprintf(thd_name, "mtip_svc_thd_%02d", index); dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, - dd, dd->numa_node, "%s", - thd_name); + dd, dd->numa_node, + "mtip_svc_thd_%02d", index); if (IS_ERR(dd->mtip_svc_handler)) { dev_err(&dd->pdev->dev, "service thread failed to start\n"); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 6255d1c4bba4..5c8ba5484d86 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/blk-mq.h> #include <linux/hrtimer.h> +#include <linux/lightnvm.h> struct nullb_cmd { struct list_head list; @@ -39,12 +40,14 @@ struct nullb { struct nullb_queue *queues; unsigned int nr_queues; + char disk_name[DISK_NAME_LEN]; }; static LIST_HEAD(nullb_list); static struct mutex lock; static int null_major; static int nullb_indexes; +static struct kmem_cache *ppa_cache; struct completion_queue { struct llist_head list; @@ -119,6 +122,10 @@ static int nr_devices = 2; module_param(nr_devices, int, S_IRUGO); MODULE_PARM_DESC(nr_devices, "Number of devices to register"); +static bool use_lightnvm; +module_param(use_lightnvm, bool, S_IRUGO); +MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); + static int irqmode = NULL_IRQ_SOFTIRQ; static int null_set_irqmode(const char *str, const struct kernel_param *kp) @@ -427,15 +434,156 @@ static void null_del_dev(struct nullb *nullb) { list_del_init(&nullb->list); - del_gendisk(nullb->disk); + if (use_lightnvm) + nvm_unregister(nullb->disk_name); + else + del_gendisk(nullb->disk); blk_cleanup_queue(nullb->q); if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); - put_disk(nullb->disk); + if (!use_lightnvm) + put_disk(nullb->disk); cleanup_queues(nullb); kfree(nullb); } +#ifdef CONFIG_NVM + +static void null_lnvm_end_io(struct request *rq, int error) +{ + struct nvm_rq *rqd = rq->end_io_data; + struct nvm_dev *dev = rqd->dev; + + dev->mt->end_io(rqd, error); + + blk_put_request(rq); +} + +static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) +{ + struct request *rq; + struct bio *bio = rqd->bio; + + rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); + if (IS_ERR(rq)) + return -ENOMEM; + + rq->cmd_type = REQ_TYPE_DRV_PRIV; + rq->__sector = bio->bi_iter.bi_sector; + rq->ioprio = bio_prio(bio); + + if (bio_has_data(bio)) + rq->nr_phys_segments = bio_phys_segments(q, bio); + + rq->__data_len = bio->bi_iter.bi_size; + rq->bio = rq->biotail = bio; + + rq->end_io_data = rqd; + + blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); + + return 0; +} + +static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) +{ + sector_t size = gb * 1024 * 1024 * 1024ULL; + sector_t blksize; + struct nvm_id_group *grp; + + id->ver_id = 0x1; + id->vmnt = 0; + id->cgrps = 1; + id->cap = 0x3; + id->dom = 0x1; + + id->ppaf.blk_offset = 0; + id->ppaf.blk_len = 16; + id->ppaf.pg_offset = 16; + id->ppaf.pg_len = 16; + id->ppaf.sect_offset = 32; + id->ppaf.sect_len = 8; + id->ppaf.pln_offset = 40; + id->ppaf.pln_len = 8; + id->ppaf.lun_offset = 48; + id->ppaf.lun_len = 8; + id->ppaf.ch_offset = 56; + id->ppaf.ch_len = 8; + + do_div(size, bs); /* convert size to pages */ + do_div(size, 256); /* concert size to pgs pr blk */ + grp = &id->groups[0]; + grp->mtype = 0; + grp->fmtype = 0; + grp->num_ch = 1; + grp->num_pg = 256; + blksize = size; + do_div(size, (1 << 16)); + grp->num_lun = size + 1; + do_div(blksize, grp->num_lun); + grp->num_blk = blksize; + grp->num_pln = 1; + + grp->fpg_sz = bs; + grp->csecs = bs; + grp->trdt = 25000; + grp->trdm = 25000; + grp->tprt = 500000; + grp->tprm = 500000; + grp->tbet = 1500000; + grp->tbem = 1500000; + grp->mpos = 0x010101; /* single plane rwe */ + grp->cpar = hw_queue_depth; + + return 0; +} + +static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name) +{ + mempool_t *virtmem_pool; + + virtmem_pool = mempool_create_slab_pool(64, ppa_cache); + if (!virtmem_pool) { + pr_err("null_blk: Unable to create virtual memory pool\n"); + return NULL; + } + + return virtmem_pool; +} + +static void null_lnvm_destroy_dma_pool(void *pool) +{ + mempool_destroy(pool); +} + +static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool, + gfp_t mem_flags, dma_addr_t *dma_handler) +{ + return mempool_alloc(pool, mem_flags); +} + +static void null_lnvm_dev_dma_free(void *pool, void *entry, + dma_addr_t dma_handler) +{ + mempool_free(entry, pool); +} + +static struct nvm_dev_ops null_lnvm_dev_ops = { + .identity = null_lnvm_id, + .submit_io = null_lnvm_submit_io, + + .create_dma_pool = null_lnvm_create_dma_pool, + .destroy_dma_pool = null_lnvm_destroy_dma_pool, + .dev_dma_alloc = null_lnvm_dev_dma_alloc, + .dev_dma_free = null_lnvm_dev_dma_free, + + /* Simulate nvme protocol restriction */ + .max_phys_sect = 64, +}; +#else +static struct nvm_dev_ops null_lnvm_dev_ops; +#endif /* CONFIG_NVM */ + static int null_open(struct block_device *bdev, fmode_t mode) { return 0; @@ -575,11 +723,6 @@ static int null_add_dev(void) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); - disk = nullb->disk = alloc_disk_node(1, home_node); - if (!disk) { - rv = -ENOMEM; - goto out_cleanup_blk_queue; - } mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); @@ -589,6 +732,21 @@ static int null_add_dev(void) blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); + sprintf(nullb->disk_name, "nullb%d", nullb->index); + + if (use_lightnvm) { + rv = nvm_register(nullb->q, nullb->disk_name, + &null_lnvm_dev_ops); + if (rv) + goto out_cleanup_blk_queue; + goto done; + } + + disk = nullb->disk = alloc_disk_node(1, home_node); + if (!disk) { + rv = -ENOMEM; + goto out_cleanup_lightnvm; + } size = gb * 1024 * 1024 * 1024ULL; set_capacity(disk, size >> 9); @@ -598,10 +756,15 @@ static int null_add_dev(void) disk->fops = &null_fops; disk->private_data = nullb; disk->queue = nullb->q; - sprintf(disk->disk_name, "nullb%d", nullb->index); + strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); + add_disk(disk); +done: return 0; +out_cleanup_lightnvm: + if (use_lightnvm) + nvm_unregister(nullb->disk_name); out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: @@ -625,6 +788,18 @@ static int __init null_init(void) bs = PAGE_SIZE; } + if (use_lightnvm && bs != 4096) { + pr_warn("null_blk: LightNVM only supports 4k block size\n"); + pr_warn("null_blk: defaults block size to 4k\n"); + bs = 4096; + } + + if (use_lightnvm && queue_mode != NULL_Q_MQ) { + pr_warn("null_blk: LightNVM only supported for blk-mq\n"); + pr_warn("null_blk: defaults queue mode to blk-mq\n"); + queue_mode = NULL_Q_MQ; + } + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", @@ -655,15 +830,27 @@ static int __init null_init(void) if (null_major < 0) return null_major; + if (use_lightnvm) { + ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), + 0, 0, NULL); + if (!ppa_cache) { + pr_err("null_blk: unable to create ppa cache\n"); + return -ENOMEM; + } + } + for (i = 0; i < nr_devices; i++) { if (null_add_dev()) { unregister_blkdev(null_major, "nullb"); - return -EINVAL; + goto err_ppa; } } pr_info("null: module loaded\n"); return 0; +err_ppa: + kmem_cache_destroy(ppa_cache); + return -EINVAL; } static void __exit null_exit(void) @@ -678,6 +865,8 @@ static void __exit null_exit(void) null_del_dev(nullb); } mutex_unlock(&lock); + + kmem_cache_destroy(ppa_cache); } module_init(null_init); diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index 9f1856948758..bf500e0e7362 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c @@ -117,7 +117,7 @@ static struct platform_driver omap_ocp2scp_driver = { module_platform_driver(omap_ocp2scp_driver); -MODULE_ALIAS("platform: omap-ocp2scp"); +MODULE_ALIAS("platform:omap-ocp2scp"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_DESCRIPTION("OMAP OCP2SCP driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index e8cb334094b0..7c0bdfb1a2ca 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -98,10 +98,11 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->max = cpu->perf_caps.highest_perf; policy->cpuinfo.min_freq = policy->min; policy->cpuinfo.max_freq = policy->max; + policy->shared_type = cpu->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) cpumask_copy(policy->cpus, cpu->shared_cpu_map); - else { + else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { /* Support only SW_ANY for now. */ pr_debug("Unsupported CPU co-ord type\n"); return -EFAULT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 306f75700bf8..251b14736de9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -496,6 +496,7 @@ struct amdgpu_bo_va_mapping { /* bo virtual addresses in a specific vm */ struct amdgpu_bo_va { + struct mutex mutex; /* protected by bo being reserved */ struct list_head bo_list; struct fence *last_pt_update; @@ -928,8 +929,6 @@ struct amdgpu_vm_id { }; struct amdgpu_vm { - struct mutex mutex; - struct rb_root va; /* protecting invalidated */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3afcf0237c25..1d44d508d4d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -784,8 +784,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct amdgpu_device *adev = dev->dev_private; union drm_amdgpu_cs *cs = data; - struct amdgpu_fpriv *fpriv = filp->driver_priv; - struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; int i, r; @@ -803,7 +801,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_handle_lockup(adev, r); return r; } - mutex_lock(&vm->mutex); r = amdgpu_cs_parser_relocs(&parser); if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); @@ -888,7 +885,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); - mutex_unlock(&vm->mutex); r = amdgpu_cs_handle_lockup(adev, r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 00c5b580f56c..fc32fc01a64b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, false); - if (r) { - mutex_unlock(&vm->mutex); + if (r) return r; - } bo_va = amdgpu_vm_bo_find(vm, rbo); if (!bo_va) { @@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri ++bo_va->ref_count; } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); return 0; } @@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, true); if (r) { - mutex_unlock(&vm->mutex); dev_err(adev->dev, "leaking bo va because " "we fail to reserve bo (%d)\n", r); return; @@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, } } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); } static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) @@ -553,7 +546,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) return -ENOENT; - mutex_lock(&fpriv->vm.mutex); rbo = gem_to_amdgpu_bo(gobj); INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&duplicates); @@ -568,7 +560,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, } r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) { - mutex_unlock(&fpriv->vm.mutex); drm_gem_object_unreference_unlocked(gobj); return r; } @@ -577,7 +568,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (!bo_va) { ttm_eu_backoff_reservation(&ticket, &list); drm_gem_object_unreference_unlocked(gobj); - mutex_unlock(&fpriv->vm.mutex); return -ENOENT; } @@ -602,7 +592,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ttm_eu_backoff_reservation(&ticket, &list); if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) amdgpu_gem_va_update_vm(adev, bo_va, args->operation); - mutex_unlock(&fpriv->vm.mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 03f0c3bae516..a745eeeb5d82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = 0x00000030; /* len */ + if ((ring->adev->vce.fw_version >> 24) >= 52) + ib->ptr[ib->length_dw++] = 0x00000040; /* len */ + else + ib->ptr[ib->length_dw++] = 0x00000030; /* len */ ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000042; @@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000100; ib->ptr[ib->length_dw++] = 0x0000000c; ib->ptr[ib->length_dw++] = 0x00000000; + if ((ring->adev->vce.fw_version >> 24) >= 52) { + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + } ib->ptr[ib->length_dw++] = 0x00000014; /* len */ ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 159ce54bbd8d..ae037e5b6ad0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -922,8 +922,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, vm_status); spin_unlock(&vm->status_lock); - + mutex_lock(&bo_va->mutex); r = amdgpu_vm_bo_update(adev, bo_va, NULL); + mutex_unlock(&bo_va->mutex); if (r) return r; @@ -967,7 +968,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->vm_status); - + mutex_init(&bo_va->mutex); list_add_tail(&bo_va->bo_list, &bo->va); return bo_va; @@ -1045,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, mapping->offset = offset; mapping->flags = flags; + mutex_lock(&bo_va->mutex); list_add(&mapping->list, &bo_va->invalids); + mutex_unlock(&bo_va->mutex); spin_lock(&vm->it_lock); interval_tree_insert(&mapping->it, &vm->va); spin_unlock(&vm->it_lock); @@ -1121,7 +1124,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, bool valid = true; saddr /= AMDGPU_GPU_PAGE_SIZE; - + mutex_lock(&bo_va->mutex); list_for_each_entry(mapping, &bo_va->valids, list) { if (mapping->it.start == saddr) break; @@ -1135,10 +1138,12 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, break; } - if (&mapping->list == &bo_va->invalids) + if (&mapping->list == &bo_va->invalids) { + mutex_unlock(&bo_va->mutex); return -ENOENT; + } } - + mutex_unlock(&bo_va->mutex); list_del(&mapping->list); spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); @@ -1190,8 +1195,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, spin_unlock(&vm->it_lock); kfree(mapping); } - fence_put(bo_va->last_pt_update); + mutex_destroy(&bo_va->mutex); kfree(bo_va); } @@ -1236,7 +1241,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->ids[i].id = 0; vm->ids[i].flushed_updates = NULL; } - mutex_init(&vm->mutex); vm->va = RB_ROOT; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); @@ -1320,7 +1324,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) fence_put(vm->ids[i].flushed_updates); } - mutex_destroy(&vm->mutex); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6a52db6ad8d7..370c6c9d81c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -40,6 +40,9 @@ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 #define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024) @@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev) /* set BUSY flag */ WREG32_P(mmVCE_STATUS, 1, ~1); - - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, - ~VCE_VCPU_CNTL__CLK_EN_MASK); + if (adev->asic_type >= CHIP_STONEY) + WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); + else + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, + ~VCE_VCPU_CNTL__CLK_EN_MASK); WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, @@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32(mmVCE_LMI_SWAP_CNTL, 0); WREG32(mmVCE_LMI_SWAP_CNTL1, 0); WREG32(mmVCE_LMI_VM_CTRL, 0); - - WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); + if (adev->asic_type >= CHIP_STONEY) { + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); + } else + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); offset = AMDGPU_VCE_FIRMWARE_OFFSET; size = VCE_V3_0_FW_SIZE; WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); @@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: VCE\n"); + + WREG32_P(mmVCE_SYS_INT_STATUS, + VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, + ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); + switch (entry->src_data) { case 0: amdgpu_fence_process(&adev->vce.ring[0]); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ea30d6ad4c13..651129f2ec1d 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -30,8 +30,7 @@ #define CREATE_TRACE_POINTS #include "gpu_sched_trace.h" -static struct amd_sched_job * -amd_sched_entity_pop_job(struct amd_sched_entity *entity); +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); struct kmem_cache *sched_fence_slab; @@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, } /** - * Select next job from a specified run queue with round robin policy. - * Return NULL if nothing available. + * Select an entity which could provide a job to run + * + * @rq The run queue to check. + * + * Try to find a ready entity, returns NULL if none found. */ -static struct amd_sched_job * -amd_sched_rq_select_job(struct amd_sched_rq *rq) +static struct amd_sched_entity * +amd_sched_rq_select_entity(struct amd_sched_rq *rq) { struct amd_sched_entity *entity; - struct amd_sched_job *sched_job; spin_lock(&rq->lock); entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } } } list_for_each_entry(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } if (entity == rq->current_entity) @@ -177,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) } /** + * Check if entity is ready + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity could provide a job. + */ +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) +{ + if (kfifo_is_empty(&entity->job_queue)) + return false; + + if (ACCESS_ONCE(entity->dependency)) + return false; + + return true; +} + +/** * Destroy a context entity * * @sched Pointer to scheduler instance @@ -211,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) amd_sched_wakeup(entity->sched); } +static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) +{ + struct amd_gpu_scheduler *sched = entity->sched; + struct fence * fence = entity->dependency; + struct amd_sched_fence *s_fence; + + if (fence->context == entity->fence_context) { + /* We can ignore fences from ourself */ + fence_put(entity->dependency); + return false; + } + + s_fence = to_amd_sched_fence(fence); + if (s_fence && s_fence->sched == sched) { + /* Fence is from the same scheduler */ + if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { + /* Ignore it when it is already scheduled */ + fence_put(entity->dependency); + return false; + } + + /* Wait for fence to be scheduled */ + entity->cb.func = amd_sched_entity_wakeup; + list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); + return true; + } + + if (!fence_add_callback(entity->dependency, &entity->cb, + amd_sched_entity_wakeup)) + return true; + + fence_put(entity->dependency); + return false; +} + static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = entity->sched; struct amd_sched_job *sched_job; - if (ACCESS_ONCE(entity->dependency)) - return NULL; - if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) return NULL; - while ((entity->dependency = sched->ops->dependency(sched_job))) { - - if (entity->dependency->context == entity->fence_context) { - /* We can ignore fences from ourself */ - fence_put(entity->dependency); - continue; - } - - if (fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) - fence_put(entity->dependency); - else + while ((entity->dependency = sched->ops->dependency(sched_job))) + if (amd_sched_entity_add_dependency_cb(entity)) return NULL; - } return sched_job; } @@ -304,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) } /** - * Select next to run + * Select next entity to process */ -static struct amd_sched_job * -amd_sched_select_job(struct amd_gpu_scheduler *sched) +static struct amd_sched_entity * +amd_sched_select_entity(struct amd_gpu_scheduler *sched) { - struct amd_sched_job *sched_job; + struct amd_sched_entity *entity; if (!amd_sched_ready(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - sched_job = amd_sched_rq_select_job(&sched->kernel_rq); - if (sched_job == NULL) - sched_job = amd_sched_rq_select_job(&sched->sched_rq); + entity = amd_sched_rq_select_entity(&sched->kernel_rq); + if (entity == NULL) + entity = amd_sched_rq_select_entity(&sched->sched_rq); - return sched_job; + return entity; } static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) @@ -381,13 +419,16 @@ static int amd_sched_main(void *param) unsigned long flags; wait_event_interruptible(sched->wake_up_worker, - kthread_should_stop() || - (sched_job = amd_sched_select_job(sched))); + (entity = amd_sched_select_entity(sched)) || + kthread_should_stop()); + if (!entity) + continue; + + sched_job = amd_sched_entity_pop_job(entity); if (!sched_job) continue; - entity = sched_job->s_entity; s_fence = sched_job->s_fence; if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { @@ -400,6 +441,7 @@ static int amd_sched_main(void *param) atomic_inc(&sched->hw_rq_count); fence = sched->ops->run_job(sched_job); + amd_sched_fence_scheduled(s_fence); if (fence) { r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 939692b14f4b..a0f0ae53aacd 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -27,6 +27,8 @@ #include <linux/kfifo.h> #include <linux/fence.h> +#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS + struct amd_gpu_scheduler; struct amd_sched_rq; @@ -68,6 +70,7 @@ struct amd_sched_rq { struct amd_sched_fence { struct fence base; struct fence_cb cb; + struct list_head scheduled_cb; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; @@ -134,7 +137,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); +void amd_sched_fence_scheduled(struct amd_sched_fence *fence); void amd_sched_fence_signal(struct amd_sched_fence *fence); - #endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 8d2130b9ff05..87c78eecea64 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -35,6 +35,8 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); if (fence == NULL) return NULL; + + INIT_LIST_HEAD(&fence->scheduled_cb); fence->owner = owner; fence->sched = s_entity->sched; spin_lock_init(&fence->lock); @@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence) FENCE_TRACE(&fence->base, "was already signaled\n"); } +void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) +{ + struct fence_cb *cur, *tmp; + + set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); + list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { + list_del_init(&cur->node); + cur->func(&s_fence->base, cur); + } +} + static const char *amd_sched_fence_get_driver_name(struct fence *fence) { return "amd_sched"; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h index 28bc202f9753..40f845e31272 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h @@ -7,6 +7,7 @@ struct nvkm_instmem { const struct nvkm_instmem_func *func; struct nvkm_subdev subdev; + spinlock_t lock; struct list_head list; u32 reserved; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 8b8332e46f24..d5e6938cc6bc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, return -ENODEV; } obj = (union acpi_object *)buffer.pointer; + len = min(len, (int)obj->buffer.length); memcpy(bios+offset, obj->buffer.pointer, len); kfree(buffer.pointer); return len; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 3050042e6c6d..a02813e994ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -39,6 +39,7 @@ #include <nvif/client.h> #include <nvif/device.h> +#include <nvif/ioctl.h> #include <drmP.h> @@ -65,9 +66,10 @@ struct nouveau_drm_tile { }; enum nouveau_drm_object_route { - NVDRM_OBJECT_NVIF = 0, + NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF, NVDRM_OBJECT_USIF, NVDRM_OBJECT_ABI16, + NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY, }; enum nouveau_drm_notify_route { diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 89dc4ce63490..6ae1b3494bcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) if (nvif_unpack(argv->v0, 0, 0, true)) { /* block access to objects not created via this interface */ owner = argv->v0.owner; - argv->v0.owner = NVDRM_OBJECT_USIF; + if (argv->v0.object == 0ULL) + argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ + else + argv->v0.owner = NVDRM_OBJECT_USIF; } else goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index e3c783d0e2ab..caf22b589edc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -279,6 +279,12 @@ nvkm_device_pci_10de_0fe3[] = { }; static const struct nvkm_device_pci_vendor +nvkm_device_pci_10de_0fe4[] = { + { 0x144d, 0xc740, NULL, { .War00C800_0 = true } }, + {} +}; + +static const struct nvkm_device_pci_vendor nvkm_device_pci_10de_104b[] = { { 0x1043, 0x844c, "GeForce GT 625" }, { 0x1043, 0x846b, "GeForce GT 625" }, @@ -689,6 +695,12 @@ nvkm_device_pci_10de_1199[] = { }; static const struct nvkm_device_pci_vendor +nvkm_device_pci_10de_11e0[] = { + { 0x1558, 0x5106, NULL, { .War00C800_0 = true } }, + {} +}; + +static const struct nvkm_device_pci_vendor nvkm_device_pci_10de_11e3[] = { { 0x17aa, 0x3683, "GeForce GTX 760A" }, {} @@ -1370,7 +1382,7 @@ nvkm_device_pci_10de[] = { { 0x0fe1, "GeForce GT 730M" }, { 0x0fe2, "GeForce GT 745M" }, { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, - { 0x0fe4, "GeForce GT 750M" }, + { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 }, { 0x0fe9, "GeForce GT 750M" }, { 0x0fea, "GeForce GT 755M" }, { 0x0fec, "GeForce 710A" }, @@ -1485,7 +1497,7 @@ nvkm_device_pci_10de[] = { { 0x11c6, "GeForce GTX 650 Ti" }, { 0x11c8, "GeForce GTX 650" }, { 0x11cb, "GeForce GT 740" }, - { 0x11e0, "GeForce GTX 770M" }, + { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 }, { 0x11e1, "GeForce GTX 765M" }, { 0x11e2, "GeForce GTX 765M" }, { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c index b5b875928aba..74de7a96c22a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c @@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info) const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; const u32 t = timeslice_mode; const u32 o = PPC_UNIT(gpc, ppc, 0); + if (!(gr->ppc_mask[gpc] & (1 << ppc))) + continue; mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc index 194afe910d21..7dacb3cc0668 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc @@ -52,10 +52,12 @@ mmio_list_base: #endif #ifdef INCLUDE_CODE +#define gpc_addr(reg,addr) /* +*/ imm32(reg,addr) /* +*/ or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE #define gpc_wr32(addr,reg) /* +*/ gpc_addr($r14,addr) /* */ mov b32 $r15 reg /* -*/ imm32($r14, addr) /* -*/ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /* */ call(nv_wr32) // reports an exception to the host @@ -161,7 +163,7 @@ init: #if NV_PGRAPH_GPCX_UNK__SIZE > 0 // figure out which, and how many, UNKs are actually present - imm32($r14, 0x500c30) + gpc_addr($r14, 0x500c30) clear b32 $r2 clear b32 $r3 clear b32 $r4 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h index 64d07df4b8b1..bb820ff28621 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h @@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = { 0x03f01200, 0x0002d000, 0x17f104bd, - 0x10fe0542, + 0x10fe0545, 0x0007f100, 0x0003f007, 0xbd0000d0, @@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = { 0x02d00103, 0xf104bd00, 0xf00c30e7, - 0x24bd50e3, - 0x44bd34bd, -/* 0x0430: init_unk_loop */ - 0xb06821f4, - 0x0bf400f6, - 0x01f7f00f, - 0xfd04f2bb, - 0x30b6054f, -/* 0x0445: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40126, -/* 0x0451: init_unk_done */ - 0x070380e2, - 0xf1080480, - 0xf0010027, - 0x22cf0223, - 0x9534bd00, - 0x07f10825, - 0x03f0c000, - 0x0005d001, - 0x07f104bd, - 0x03f0c100, - 0x0005d001, - 0x0e9804bd, - 0x010f9800, - 0x015021f5, - 0xbb002fbb, - 0x0e98003f, - 0x020f9801, - 0x015021f5, - 0xfd050e98, - 0x2ebb00ef, - 0x003ebb00, - 0x98020e98, - 0x21f5030f, - 0x0e980150, - 0x00effd07, - 0xbb002ebb, - 0x35b6003e, - 0x0007f102, - 0x0103f0d3, - 0xbd0003d0, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb90834b6, - 0x21f5022f, - 0x2fbb02d3, - 0x003fbb00, - 0x010007f1, - 0xd00203f0, + 0xe5f050e3, + 0xbd24bd01, +/* 0x0433: init_unk_loop */ + 0xf444bd34, + 0xf6b06821, + 0x0f0bf400, + 0xbb01f7f0, + 0x4ffd04f2, + 0x0130b605, +/* 0x0448: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf401, +/* 0x0454: init_unk_done */ + 0x80070380, + 0x27f10804, + 0x23f00100, + 0x0022cf02, + 0x259534bd, + 0x0007f108, + 0x0103f0c0, + 0xbd0005d0, + 0x0007f104, + 0x0103f0c1, + 0xbd0005d0, + 0x000e9804, + 0xf5010f98, + 0xbb015021, + 0x3fbb002f, + 0x010e9800, + 0xf5020f98, + 0x98015021, + 0xeffd050e, + 0x002ebb00, + 0x98003ebb, + 0x0f98020e, + 0x5021f503, + 0x070e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0235b600, + 0xd30007f1, + 0xd00103f0, 0x04bd0003, - 0x29f024bd, - 0x0007f11f, - 0x0203f008, - 0xbd0002d0, -/* 0x0505: main */ - 0x0031f404, - 0xf00028f4, - 0x21f424d7, - 0xf401f439, - 0xf404e4b0, - 0x81fe1e18, - 0x0627f001, - 0x12fd20bd, - 0x01e4b604, - 0xfe051efd, - 0x21f50018, - 0x0ef405fa, -/* 0x0535: main_not_ctx_xfer */ - 0x10ef94d3, - 0xf501f5f0, - 0xf4037e21, -/* 0x0542: ih */ - 0x80f9c60e, - 0xf90188fe, - 0xf990f980, - 0xf9b0f9a0, - 0xf9e0f9d0, - 0xf104bdf0, - 0xf00200a7, - 0xaacf00a3, - 0x04abc400, - 0xf02c0bf4, - 0xe7f124d7, - 0xe3f01a00, - 0x00eecf00, - 0x1900f7f1, - 0xcf00f3f0, - 0x21f400ff, - 0x01e7f004, - 0x1d0007f1, - 0xd00003f0, - 0x04bd000e, -/* 0x0590: ih_no_fifo */ - 0x010007f1, - 0xd00003f0, - 0x04bd000a, - 0xe0fcf0fc, - 0xb0fcd0fc, - 0x90fca0fc, - 0x88fe80fc, - 0xf480fc00, - 0x01f80032, -/* 0x05b4: hub_barrier_done */ - 0x9801f7f0, - 0xfebb040e, - 0x02ffb904, - 0x9418e7f1, - 0xf440e3f0, - 0x00f89d21, -/* 0x05cc: ctx_redswitch */ - 0xf120f7f0, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb90834, + 0xd321f502, + 0x002fbb02, + 0xf1003fbb, + 0xf0010007, + 0x03d00203, + 0xbd04bd00, + 0x1f29f024, + 0x080007f1, + 0xd00203f0, + 0x04bd0002, +/* 0x0508: main */ + 0xf40031f4, + 0xd7f00028, + 0x3921f424, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1e, + 0xbd0627f0, + 0x0412fd20, + 0xfd01e4b6, + 0x18fe051e, + 0xfd21f500, + 0xd30ef405, +/* 0x0538: main_not_ctx_xfer */ + 0xf010ef94, + 0x21f501f5, + 0x0ef4037e, +/* 0x0545: ih */ + 0xfe80f9c6, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xa7f104bd, + 0xa3f00200, + 0x00aacf00, + 0xf404abc4, + 0xd7f02c0b, + 0x00e7f124, + 0x00e3f01a, + 0xf100eecf, + 0xf01900f7, + 0xffcf00f3, + 0x0421f400, + 0xf101e7f0, + 0xf01d0007, + 0x0ed00003, +/* 0x0593: ih_no_fifo */ + 0xf104bd00, + 0xf0010007, + 0x0ad00003, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x05b7: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb04, + 0xf102ffb9, + 0xf09418e7, + 0x21f440e3, +/* 0x05cf: ctx_redswitch */ + 0xf000f89d, + 0x07f120f7, + 0x03f08500, + 0x000fd001, + 0xe7f004bd, +/* 0x05e1: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0xf10200f5, 0xf0850007, 0x0fd00103, - 0xf004bd00, -/* 0x05de: ctx_redswitch_delay */ - 0xe2b608e7, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x850007f1, - 0xd00103f0, - 0x04bd000f, -/* 0x05fa: ctx_xfer */ - 0x07f100f8, - 0x03f08100, - 0x000fd002, - 0x11f404bd, - 0xcc21f507, -/* 0x060d: ctx_xfer_not_load */ - 0x6a21f505, - 0xf124bd02, - 0xf047fc07, - 0x02d00203, - 0xf004bd00, - 0x20b6012c, - 0xfc07f103, - 0x0203f04a, - 0xbd0002d0, - 0x01acf004, - 0xf102a5f0, - 0xf00000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x00e7f001, - 0x016f21f5, - 0xf101acf0, - 0xf04000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x0800e7f1, - 0x016f21f5, + 0xf804bd00, +/* 0x05fd: ctx_xfer */ + 0x0007f100, + 0x0203f081, + 0xbd000fd0, + 0x0711f404, + 0x05cf21f5, +/* 0x0610: ctx_xfer_not_load */ + 0x026a21f5, + 0x07f124bd, + 0x03f047fc, + 0x0002d002, + 0x2cf004bd, + 0x0320b601, + 0x4afc07f1, + 0xd00203f0, + 0x04bd0002, 0xf001acf0, - 0xb7f104a5, - 0xb3f03000, + 0xb7f102a5, + 0xb3f00000, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0xf1080f98, - 0xf50200e7, - 0xf5016f21, - 0xf4025e21, - 0x12f40601, -/* 0x06a9: ctx_xfer_post */ - 0x7f21f507, -/* 0x06ad: ctx_xfer_done */ - 0xb421f502, - 0x0000f805, - 0x00000000, + 0x010d9800, + 0xf500e7f0, + 0xf0016f21, + 0xb7f101ac, + 0xb3f04000, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0xf1060f98, + 0xf50800e7, + 0xf0016f21, + 0xa5f001ac, + 0x00b7f104, + 0x50b3f030, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0xe7f1080f, + 0x21f50200, + 0x21f5016f, + 0x01f4025e, + 0x0712f406, +/* 0x06ac: ctx_xfer_post */ + 0x027f21f5, +/* 0x06b0: ctx_xfer_done */ + 0x05b721f5, + 0x000000f8, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h index 2f596433c222..911976d20940 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h @@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = { 0x03f01200, 0x0002d000, 0x17f104bd, - 0x10fe0542, + 0x10fe0545, 0x0007f100, 0x0003f007, 0xbd0000d0, @@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = { 0x02d00103, 0xf104bd00, 0xf00c30e7, - 0x24bd50e3, - 0x44bd34bd, -/* 0x0430: init_unk_loop */ - 0xb06821f4, - 0x0bf400f6, - 0x01f7f00f, - 0xfd04f2bb, - 0x30b6054f, -/* 0x0445: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40126, -/* 0x0451: init_unk_done */ - 0x070380e2, - 0xf1080480, - 0xf0010027, - 0x22cf0223, - 0x9534bd00, - 0x07f10825, - 0x03f0c000, - 0x0005d001, - 0x07f104bd, - 0x03f0c100, - 0x0005d001, - 0x0e9804bd, - 0x010f9800, - 0x015021f5, - 0xbb002fbb, - 0x0e98003f, - 0x020f9801, - 0x015021f5, - 0xfd050e98, - 0x2ebb00ef, - 0x003ebb00, - 0x98020e98, - 0x21f5030f, - 0x0e980150, - 0x00effd07, - 0xbb002ebb, - 0x35b6003e, - 0x0007f102, - 0x0103f0d3, - 0xbd0003d0, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb90834b6, - 0x21f5022f, - 0x2fbb02d3, - 0x003fbb00, - 0x010007f1, - 0xd00203f0, + 0xe5f050e3, + 0xbd24bd01, +/* 0x0433: init_unk_loop */ + 0xf444bd34, + 0xf6b06821, + 0x0f0bf400, + 0xbb01f7f0, + 0x4ffd04f2, + 0x0130b605, +/* 0x0448: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf401, +/* 0x0454: init_unk_done */ + 0x80070380, + 0x27f10804, + 0x23f00100, + 0x0022cf02, + 0x259534bd, + 0x0007f108, + 0x0103f0c0, + 0xbd0005d0, + 0x0007f104, + 0x0103f0c1, + 0xbd0005d0, + 0x000e9804, + 0xf5010f98, + 0xbb015021, + 0x3fbb002f, + 0x010e9800, + 0xf5020f98, + 0x98015021, + 0xeffd050e, + 0x002ebb00, + 0x98003ebb, + 0x0f98020e, + 0x5021f503, + 0x070e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0235b600, + 0xd30007f1, + 0xd00103f0, 0x04bd0003, - 0x29f024bd, - 0x0007f11f, - 0x0203f008, - 0xbd0002d0, -/* 0x0505: main */ - 0x0031f404, - 0xf00028f4, - 0x21f424d7, - 0xf401f439, - 0xf404e4b0, - 0x81fe1e18, - 0x0627f001, - 0x12fd20bd, - 0x01e4b604, - 0xfe051efd, - 0x21f50018, - 0x0ef405fa, -/* 0x0535: main_not_ctx_xfer */ - 0x10ef94d3, - 0xf501f5f0, - 0xf4037e21, -/* 0x0542: ih */ - 0x80f9c60e, - 0xf90188fe, - 0xf990f980, - 0xf9b0f9a0, - 0xf9e0f9d0, - 0xf104bdf0, - 0xf00200a7, - 0xaacf00a3, - 0x04abc400, - 0xf02c0bf4, - 0xe7f124d7, - 0xe3f01a00, - 0x00eecf00, - 0x1900f7f1, - 0xcf00f3f0, - 0x21f400ff, - 0x01e7f004, - 0x1d0007f1, - 0xd00003f0, - 0x04bd000e, -/* 0x0590: ih_no_fifo */ - 0x010007f1, - 0xd00003f0, - 0x04bd000a, - 0xe0fcf0fc, - 0xb0fcd0fc, - 0x90fca0fc, - 0x88fe80fc, - 0xf480fc00, - 0x01f80032, -/* 0x05b4: hub_barrier_done */ - 0x9801f7f0, - 0xfebb040e, - 0x02ffb904, - 0x9418e7f1, - 0xf440e3f0, - 0x00f89d21, -/* 0x05cc: ctx_redswitch */ - 0xf120f7f0, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb90834, + 0xd321f502, + 0x002fbb02, + 0xf1003fbb, + 0xf0010007, + 0x03d00203, + 0xbd04bd00, + 0x1f29f024, + 0x080007f1, + 0xd00203f0, + 0x04bd0002, +/* 0x0508: main */ + 0xf40031f4, + 0xd7f00028, + 0x3921f424, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1e, + 0xbd0627f0, + 0x0412fd20, + 0xfd01e4b6, + 0x18fe051e, + 0xfd21f500, + 0xd30ef405, +/* 0x0538: main_not_ctx_xfer */ + 0xf010ef94, + 0x21f501f5, + 0x0ef4037e, +/* 0x0545: ih */ + 0xfe80f9c6, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xa7f104bd, + 0xa3f00200, + 0x00aacf00, + 0xf404abc4, + 0xd7f02c0b, + 0x00e7f124, + 0x00e3f01a, + 0xf100eecf, + 0xf01900f7, + 0xffcf00f3, + 0x0421f400, + 0xf101e7f0, + 0xf01d0007, + 0x0ed00003, +/* 0x0593: ih_no_fifo */ + 0xf104bd00, + 0xf0010007, + 0x0ad00003, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x05b7: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb04, + 0xf102ffb9, + 0xf09418e7, + 0x21f440e3, +/* 0x05cf: ctx_redswitch */ + 0xf000f89d, + 0x07f120f7, + 0x03f08500, + 0x000fd001, + 0xe7f004bd, +/* 0x05e1: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0xf10200f5, 0xf0850007, 0x0fd00103, - 0xf004bd00, -/* 0x05de: ctx_redswitch_delay */ - 0xe2b608e7, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x850007f1, - 0xd00103f0, - 0x04bd000f, -/* 0x05fa: ctx_xfer */ - 0x07f100f8, - 0x03f08100, - 0x000fd002, - 0x11f404bd, - 0xcc21f507, -/* 0x060d: ctx_xfer_not_load */ - 0x6a21f505, - 0xf124bd02, - 0xf047fc07, - 0x02d00203, - 0xf004bd00, - 0x20b6012c, - 0xfc07f103, - 0x0203f04a, - 0xbd0002d0, - 0x01acf004, - 0xf102a5f0, - 0xf00000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x00e7f001, - 0x016f21f5, - 0xf101acf0, - 0xf04000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x0800e7f1, - 0x016f21f5, + 0xf804bd00, +/* 0x05fd: ctx_xfer */ + 0x0007f100, + 0x0203f081, + 0xbd000fd0, + 0x0711f404, + 0x05cf21f5, +/* 0x0610: ctx_xfer_not_load */ + 0x026a21f5, + 0x07f124bd, + 0x03f047fc, + 0x0002d002, + 0x2cf004bd, + 0x0320b601, + 0x4afc07f1, + 0xd00203f0, + 0x04bd0002, 0xf001acf0, - 0xb7f104a5, - 0xb3f03000, + 0xb7f102a5, + 0xb3f00000, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0xf1080f98, - 0xf50200e7, - 0xf5016f21, - 0xf4025e21, - 0x12f40601, -/* 0x06a9: ctx_xfer_post */ - 0x7f21f507, -/* 0x06ad: ctx_xfer_done */ - 0xb421f502, - 0x0000f805, - 0x00000000, + 0x010d9800, + 0xf500e7f0, + 0xf0016f21, + 0xb7f101ac, + 0xb3f04000, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0xf1060f98, + 0xf50800e7, + 0xf0016f21, + 0xa5f001ac, + 0x00b7f104, + 0x50b3f030, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0xe7f1080f, + 0x21f50200, + 0x21f5016f, + 0x01f4025e, + 0x0712f406, +/* 0x06ac: ctx_xfer_post */ + 0x027f21f5, +/* 0x06b0: ctx_xfer_done */ + 0x05b721f5, + 0x000000f8, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h index ee8e54db8fc9..1c6e11b05df2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h @@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = { 0x03f01200, 0x0002d000, 0x17f104bd, - 0x10fe0542, + 0x10fe0545, 0x0007f100, 0x0003f007, 0xbd0000d0, @@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = { 0x02d00103, 0xf104bd00, 0xf00c30e7, - 0x24bd50e3, - 0x44bd34bd, -/* 0x0430: init_unk_loop */ - 0xb06821f4, - 0x0bf400f6, - 0x01f7f00f, - 0xfd04f2bb, - 0x30b6054f, -/* 0x0445: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40226, -/* 0x0451: init_unk_done */ - 0x070380e2, - 0xf1080480, - 0xf0010027, - 0x22cf0223, - 0x9534bd00, - 0x07f10825, - 0x03f0c000, - 0x0005d001, - 0x07f104bd, - 0x03f0c100, - 0x0005d001, - 0x0e9804bd, - 0x010f9800, - 0x015021f5, - 0xbb002fbb, - 0x0e98003f, - 0x020f9801, - 0x015021f5, - 0xfd050e98, - 0x2ebb00ef, - 0x003ebb00, - 0x98020e98, - 0x21f5030f, - 0x0e980150, - 0x00effd07, - 0xbb002ebb, - 0x35b6003e, - 0x0007f102, - 0x0103f0d3, - 0xbd0003d0, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb90834b6, - 0x21f5022f, - 0x2fbb02d3, - 0x003fbb00, - 0x010007f1, - 0xd00203f0, + 0xe5f050e3, + 0xbd24bd01, +/* 0x0433: init_unk_loop */ + 0xf444bd34, + 0xf6b06821, + 0x0f0bf400, + 0xbb01f7f0, + 0x4ffd04f2, + 0x0130b605, +/* 0x0448: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf402, +/* 0x0454: init_unk_done */ + 0x80070380, + 0x27f10804, + 0x23f00100, + 0x0022cf02, + 0x259534bd, + 0x0007f108, + 0x0103f0c0, + 0xbd0005d0, + 0x0007f104, + 0x0103f0c1, + 0xbd0005d0, + 0x000e9804, + 0xf5010f98, + 0xbb015021, + 0x3fbb002f, + 0x010e9800, + 0xf5020f98, + 0x98015021, + 0xeffd050e, + 0x002ebb00, + 0x98003ebb, + 0x0f98020e, + 0x5021f503, + 0x070e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0235b600, + 0xd30007f1, + 0xd00103f0, 0x04bd0003, - 0x29f024bd, - 0x0007f11f, - 0x0203f030, - 0xbd0002d0, -/* 0x0505: main */ - 0x0031f404, - 0xf00028f4, - 0x21f424d7, - 0xf401f439, - 0xf404e4b0, - 0x81fe1e18, - 0x0627f001, - 0x12fd20bd, - 0x01e4b604, - 0xfe051efd, - 0x21f50018, - 0x0ef405fa, -/* 0x0535: main_not_ctx_xfer */ - 0x10ef94d3, - 0xf501f5f0, - 0xf4037e21, -/* 0x0542: ih */ - 0x80f9c60e, - 0xf90188fe, - 0xf990f980, - 0xf9b0f9a0, - 0xf9e0f9d0, - 0xf104bdf0, - 0xf00200a7, - 0xaacf00a3, - 0x04abc400, - 0xf02c0bf4, - 0xe7f124d7, - 0xe3f01a00, - 0x00eecf00, - 0x1900f7f1, - 0xcf00f3f0, - 0x21f400ff, - 0x01e7f004, - 0x1d0007f1, - 0xd00003f0, - 0x04bd000e, -/* 0x0590: ih_no_fifo */ - 0x010007f1, - 0xd00003f0, - 0x04bd000a, - 0xe0fcf0fc, - 0xb0fcd0fc, - 0x90fca0fc, - 0x88fe80fc, - 0xf480fc00, - 0x01f80032, -/* 0x05b4: hub_barrier_done */ - 0x9801f7f0, - 0xfebb040e, - 0x02ffb904, - 0x9418e7f1, - 0xf440e3f0, - 0x00f89d21, -/* 0x05cc: ctx_redswitch */ - 0xf120f7f0, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb90834, + 0xd321f502, + 0x002fbb02, + 0xf1003fbb, + 0xf0010007, + 0x03d00203, + 0xbd04bd00, + 0x1f29f024, + 0x300007f1, + 0xd00203f0, + 0x04bd0002, +/* 0x0508: main */ + 0xf40031f4, + 0xd7f00028, + 0x3921f424, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1e, + 0xbd0627f0, + 0x0412fd20, + 0xfd01e4b6, + 0x18fe051e, + 0xfd21f500, + 0xd30ef405, +/* 0x0538: main_not_ctx_xfer */ + 0xf010ef94, + 0x21f501f5, + 0x0ef4037e, +/* 0x0545: ih */ + 0xfe80f9c6, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xa7f104bd, + 0xa3f00200, + 0x00aacf00, + 0xf404abc4, + 0xd7f02c0b, + 0x00e7f124, + 0x00e3f01a, + 0xf100eecf, + 0xf01900f7, + 0xffcf00f3, + 0x0421f400, + 0xf101e7f0, + 0xf01d0007, + 0x0ed00003, +/* 0x0593: ih_no_fifo */ + 0xf104bd00, + 0xf0010007, + 0x0ad00003, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x05b7: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb04, + 0xf102ffb9, + 0xf09418e7, + 0x21f440e3, +/* 0x05cf: ctx_redswitch */ + 0xf000f89d, + 0x07f120f7, + 0x03f08500, + 0x000fd001, + 0xe7f004bd, +/* 0x05e1: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0xf10200f5, 0xf0850007, 0x0fd00103, - 0xf004bd00, -/* 0x05de: ctx_redswitch_delay */ - 0xe2b608e7, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x850007f1, - 0xd00103f0, - 0x04bd000f, -/* 0x05fa: ctx_xfer */ - 0x07f100f8, - 0x03f08100, - 0x000fd002, - 0x11f404bd, - 0xcc21f507, -/* 0x060d: ctx_xfer_not_load */ - 0x6a21f505, - 0xf124bd02, - 0xf047fc07, - 0x02d00203, - 0xf004bd00, - 0x20b6012c, - 0xfc07f103, - 0x0203f04a, - 0xbd0002d0, - 0x01acf004, - 0xf102a5f0, - 0xf00000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x00e7f001, - 0x016f21f5, - 0xf101acf0, - 0xf04000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x0800e7f1, - 0x016f21f5, + 0xf804bd00, +/* 0x05fd: ctx_xfer */ + 0x0007f100, + 0x0203f081, + 0xbd000fd0, + 0x0711f404, + 0x05cf21f5, +/* 0x0610: ctx_xfer_not_load */ + 0x026a21f5, + 0x07f124bd, + 0x03f047fc, + 0x0002d002, + 0x2cf004bd, + 0x0320b601, + 0x4afc07f1, + 0xd00203f0, + 0x04bd0002, 0xf001acf0, - 0xb7f104a5, - 0xb3f03000, + 0xb7f102a5, + 0xb3f00000, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0xf1080f98, - 0xf50200e7, - 0xf5016f21, - 0xf4025e21, - 0x12f40601, -/* 0x06a9: ctx_xfer_post */ - 0x7f21f507, -/* 0x06ad: ctx_xfer_done */ - 0xb421f502, - 0x0000f805, - 0x00000000, + 0x010d9800, + 0xf500e7f0, + 0xf0016f21, + 0xb7f101ac, + 0xb3f04000, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0xf1060f98, + 0xf50800e7, + 0xf0016f21, + 0xa5f001ac, + 0x00b7f104, + 0x50b3f030, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0xe7f1080f, + 0x21f50200, + 0x21f5016f, + 0x01f4025e, + 0x0712f406, +/* 0x06ac: ctx_xfer_post */ + 0x027f21f5, +/* 0x06b0: ctx_xfer_done */ + 0x05b721f5, + 0x000000f8, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h index fbcc342f896f..84af7ec6a78e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h @@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = { 0x02020014, 0xf6120040, 0x04bd0002, - 0xfe048141, + 0xfe048441, 0x00400010, 0x0000f607, 0x040204bd, @@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = { 0x01c90080, 0xbd0002f6, 0x0c308e04, - 0xbd24bd50, -/* 0x0383: init_unk_loop */ - 0x7e44bd34, - 0xb0000065, - 0x0bf400f6, - 0xbb010f0e, - 0x4ffd04f2, - 0x0130b605, -/* 0x0398: init_unk_next */ - 0xb60120b6, - 0x26b004e0, - 0xe21bf401, -/* 0x03a4: init_unk_done */ - 0xb50703b5, - 0x00820804, - 0x22cf0201, - 0x9534bd00, - 0x00800825, - 0x05f601c0, - 0x8004bd00, - 0xf601c100, - 0x04bd0005, - 0x98000e98, - 0x207e010f, - 0x2fbb0001, - 0x003fbb00, - 0x98010e98, - 0x207e020f, - 0x0e980001, - 0x00effd05, - 0xbb002ebb, - 0x0e98003e, - 0x030f9802, - 0x0001207e, - 0xfd070e98, - 0x2ebb00ef, - 0x003ebb00, - 0x800235b6, - 0xf601d300, - 0x04bd0003, - 0xb60825b6, - 0x20b60635, - 0x0130b601, - 0xb60824b6, - 0x2fb20834, - 0x0002687e, - 0xbb002fbb, - 0x0080003f, - 0x03f60201, - 0xbd04bd00, - 0x1f29f024, - 0x02300080, - 0xbd0002f6, -/* 0x0445: main */ - 0x0031f404, - 0x0d0028f4, - 0x00377e24, - 0xf401f400, - 0xf404e4b0, - 0x81fe1d18, - 0xbd060201, - 0x0412fd20, - 0xfd01e4b6, - 0x18fe051e, - 0x05187e00, - 0xd40ef400, -/* 0x0474: main_not_ctx_xfer */ - 0xf010ef94, - 0xf87e01f5, - 0x0ef40002, -/* 0x0481: ih */ - 0xfe80f9c7, - 0x80f90188, - 0xa0f990f9, - 0xd0f9b0f9, - 0xf0f9e0f9, - 0x004a04bd, - 0x00aacf02, - 0xf404abc4, - 0x240d1f0b, - 0xcf1a004e, - 0x004f00ee, - 0x00ffcf19, - 0x0000047e, - 0x0040010e, - 0x000ef61d, -/* 0x04be: ih_no_fifo */ - 0x004004bd, - 0x000af601, - 0xf0fc04bd, - 0xd0fce0fc, - 0xa0fcb0fc, - 0x80fc90fc, - 0xfc0088fe, - 0x0032f480, -/* 0x04de: hub_barrier_done */ - 0x010f01f8, - 0xbb040e98, - 0xffb204fe, - 0x4094188e, - 0x00008f7e, -/* 0x04f2: ctx_redswitch */ - 0x200f00f8, + 0x01e5f050, + 0x34bd24bd, +/* 0x0386: init_unk_loop */ + 0x657e44bd, + 0xf6b00000, + 0x0e0bf400, + 0xf2bb010f, + 0x054ffd04, +/* 0x039b: init_unk_next */ + 0xb60130b6, + 0xe0b60120, + 0x0126b004, +/* 0x03a7: init_unk_done */ + 0xb5e21bf4, + 0x04b50703, + 0x01008208, + 0x0022cf02, + 0x259534bd, + 0xc0008008, + 0x0005f601, + 0x008004bd, + 0x05f601c1, + 0x9804bd00, + 0x0f98000e, + 0x01207e01, + 0x002fbb00, + 0x98003fbb, + 0x0f98010e, + 0x01207e02, + 0x050e9800, + 0xbb00effd, + 0x3ebb002e, + 0x020e9800, + 0x7e030f98, + 0x98000120, + 0xeffd070e, + 0x002ebb00, + 0xb6003ebb, + 0x00800235, + 0x03f601d3, + 0xb604bd00, + 0x35b60825, + 0x0120b606, + 0xb60130b6, + 0x34b60824, + 0x7e2fb208, + 0xbb000268, + 0x3fbb002f, + 0x01008000, + 0x0003f602, + 0x24bd04bd, + 0x801f29f0, + 0xf6023000, + 0x04bd0002, +/* 0x0448: main */ + 0xf40031f4, + 0x240d0028, + 0x0000377e, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1d, + 0x20bd0602, + 0xb60412fd, + 0x1efd01e4, + 0x0018fe05, + 0x00051b7e, +/* 0x0477: main_not_ctx_xfer */ + 0x94d40ef4, + 0xf5f010ef, + 0x02f87e01, + 0xc70ef400, +/* 0x0484: ih */ + 0x88fe80f9, + 0xf980f901, + 0xf9a0f990, + 0xf9d0f9b0, + 0xbdf0f9e0, + 0x02004a04, + 0xc400aacf, + 0x0bf404ab, + 0x4e240d1f, + 0xeecf1a00, + 0x19004f00, + 0x7e00ffcf, + 0x0e000004, + 0x1d004001, + 0xbd000ef6, +/* 0x04c1: ih_no_fifo */ + 0x01004004, + 0xbd000af6, + 0xfcf0fc04, + 0xfcd0fce0, + 0xfca0fcb0, + 0xfe80fc90, + 0x80fc0088, + 0xf80032f4, +/* 0x04e1: hub_barrier_done */ + 0x98010f01, + 0xfebb040e, + 0x8effb204, + 0x7e409418, + 0xf800008f, +/* 0x04f5: ctx_redswitch */ + 0x80200f00, + 0xf6018500, + 0x04bd000f, +/* 0x0502: ctx_redswitch_delay */ + 0xe2b6080e, + 0xfd1bf401, + 0x0800f5f1, + 0x0200f5f1, 0x01850080, 0xbd000ff6, -/* 0x04ff: ctx_redswitch_delay */ - 0xb6080e04, - 0x1bf401e2, - 0x00f5f1fd, - 0x00f5f108, - 0x85008002, - 0x000ff601, - 0x00f804bd, -/* 0x0518: ctx_xfer */ - 0x02810080, - 0xbd000ff6, - 0x0711f404, - 0x0004f27e, -/* 0x0528: ctx_xfer_not_load */ - 0x0002167e, - 0xfc8024bd, - 0x02f60247, - 0xf004bd00, - 0x20b6012c, - 0x4afc8003, +/* 0x051b: ctx_xfer */ + 0x8000f804, + 0xf6028100, + 0x04bd000f, + 0x7e0711f4, +/* 0x052b: ctx_xfer_not_load */ + 0x7e0004f5, + 0xbd000216, + 0x47fc8024, 0x0002f602, - 0xacf004bd, - 0x02a5f001, - 0x5000008b, - 0xb6040c98, - 0xbcbb0fc4, - 0x000c9800, - 0x0e010d98, - 0x013d7e00, - 0x01acf000, - 0x5040008b, - 0xb6040c98, - 0xbcbb0fc4, - 0x010c9800, - 0x98020d98, - 0x004e060f, - 0x013d7e08, - 0x01acf000, - 0x8b04a5f0, - 0x98503000, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x01acf004, + 0x8b02a5f0, + 0x98500000, 0xc4b6040c, 0x00bcbb0f, - 0x98020c98, - 0x0f98030d, - 0x02004e08, + 0x98000c98, + 0x000e010d, 0x00013d7e, - 0x00020a7e, - 0xf40601f4, -/* 0x05b2: ctx_xfer_post */ - 0x277e0712, -/* 0x05b6: ctx_xfer_done */ - 0xde7e0002, - 0x00f80004, - 0x00000000, + 0x8b01acf0, + 0x98504000, + 0xc4b6040c, + 0x00bcbb0f, + 0x98010c98, + 0x0f98020d, + 0x08004e06, + 0x00013d7e, + 0xf001acf0, + 0x008b04a5, + 0x0c985030, + 0x0fc4b604, + 0x9800bcbb, + 0x0d98020c, + 0x080f9803, + 0x7e02004e, + 0x7e00013d, + 0xf400020a, + 0x12f40601, +/* 0x05b5: ctx_xfer_post */ + 0x02277e07, +/* 0x05b9: ctx_xfer_done */ + 0x04e17e00, + 0x0000f800, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h index 51f5c3c6e966..11bf363a6ae9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h @@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = { 0x020014fe, 0x12004002, 0xbd0002f6, - 0x05b04104, + 0x05b34104, 0x400010fe, 0x00f60700, 0x0204bd00, @@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = { 0xc900800f, 0x0002f601, 0x308e04bd, - 0x24bd500c, - 0x44bd34bd, -/* 0x03b0: init_unk_loop */ - 0x0000657e, - 0xf400f6b0, - 0x010f0e0b, - 0xfd04f2bb, - 0x30b6054f, -/* 0x03c5: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40226, -/* 0x03d1: init_unk_done */ - 0x0703b5e2, - 0x820804b5, - 0xcf020100, - 0x34bd0022, - 0x80082595, - 0xf601c000, + 0xe5f0500c, + 0xbd24bd01, +/* 0x03b3: init_unk_loop */ + 0x7e44bd34, + 0xb0000065, + 0x0bf400f6, + 0xbb010f0e, + 0x4ffd04f2, + 0x0130b605, +/* 0x03c8: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf402, +/* 0x03d4: init_unk_done */ + 0xb50703b5, + 0x00820804, + 0x22cf0201, + 0x9534bd00, + 0x00800825, + 0x05f601c0, + 0x8004bd00, + 0xf601c100, 0x04bd0005, - 0x01c10080, - 0xbd0005f6, - 0x000e9804, - 0x7e010f98, - 0xbb000120, - 0x3fbb002f, - 0x010e9800, - 0x7e020f98, - 0x98000120, - 0xeffd050e, - 0x002ebb00, - 0x98003ebb, - 0x0f98020e, - 0x01207e03, - 0x070e9800, - 0xbb00effd, - 0x3ebb002e, - 0x0235b600, - 0x01d30080, - 0xbd0003f6, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb20834b6, - 0x02687e2f, - 0x002fbb00, - 0x0f003fbb, - 0x8effb23f, - 0xf0501d60, - 0x8f7e01e5, - 0x0c0f0000, - 0xa88effb2, - 0xe5f0501d, - 0x008f7e01, - 0x03147e00, - 0xb23f0f00, - 0x1d608eff, - 0x01e5f050, - 0x00008f7e, - 0xffb2000f, - 0x501d9c8e, - 0x7e01e5f0, - 0x0f00008f, - 0x03147e01, - 0x8effb200, + 0x98000e98, + 0x207e010f, + 0x2fbb0001, + 0x003fbb00, + 0x98010e98, + 0x207e020f, + 0x0e980001, + 0x00effd05, + 0xbb002ebb, + 0x0e98003e, + 0x030f9802, + 0x0001207e, + 0xfd070e98, + 0x2ebb00ef, + 0x003ebb00, + 0x800235b6, + 0xf601d300, + 0x04bd0003, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb20834, + 0x0002687e, + 0xbb002fbb, + 0x3f0f003f, + 0x501d608e, + 0xb201e5f0, + 0x008f7eff, + 0x8e0c0f00, 0xf0501da8, - 0x8f7e01e5, - 0xff0f0000, - 0x988effb2, + 0xffb201e5, + 0x00008f7e, + 0x0003147e, + 0x608e3f0f, 0xe5f0501d, - 0x008f7e01, - 0xb2020f00, - 0x1da88eff, + 0x7effb201, + 0x0f00008f, + 0x1d9c8e00, 0x01e5f050, - 0x00008f7e, + 0x8f7effb2, + 0x010f0000, 0x0003147e, - 0x85050498, - 0x98504000, - 0x64b60406, - 0x0056bb0f, -/* 0x04e0: tpc_strand_init_tpc_loop */ - 0x05705eb8, - 0x00657e00, - 0xbdf6b200, -/* 0x04ed: tpc_strand_init_idx_loop */ - 0x605eb874, - 0x7fb20005, - 0x00008f7e, - 0x05885eb8, - 0x082f9500, - 0x00008f7e, - 0x058c5eb8, - 0x082f9500, + 0x501da88e, + 0xb201e5f0, + 0x008f7eff, + 0x8eff0f00, + 0xf0501d98, + 0xffb201e5, 0x00008f7e, - 0x05905eb8, - 0x00657e00, - 0x06f5b600, - 0xb601f0b6, - 0x2fbb08f4, - 0x003fbb00, - 0xb60170b6, - 0x1bf40162, - 0x0050b7bf, - 0x0142b608, - 0x0fa81bf4, - 0x8effb23f, - 0xf0501d60, - 0x8f7e01e5, - 0x0d0f0000, - 0xa88effb2, + 0xa88e020f, 0xe5f0501d, - 0x008f7e01, - 0x03147e00, - 0x01008000, - 0x0003f602, - 0x24bd04bd, - 0x801f29f0, - 0xf6023000, - 0x04bd0002, -/* 0x0574: main */ - 0xf40031f4, - 0x240d0028, - 0x0000377e, - 0xb0f401f4, - 0x18f404e4, - 0x0181fe1d, - 0x20bd0602, - 0xb60412fd, - 0x1efd01e4, - 0x0018fe05, - 0x0006477e, -/* 0x05a3: main_not_ctx_xfer */ - 0x94d40ef4, - 0xf5f010ef, - 0x02f87e01, - 0xc70ef400, -/* 0x05b0: ih */ - 0x88fe80f9, - 0xf980f901, - 0xf9a0f990, - 0xf9d0f9b0, - 0xbdf0f9e0, - 0x02004a04, - 0xc400aacf, - 0x0bf404ab, - 0x4e240d1f, - 0xeecf1a00, - 0x19004f00, - 0x7e00ffcf, - 0x0e000004, - 0x1d004001, - 0xbd000ef6, -/* 0x05ed: ih_no_fifo */ - 0x01004004, - 0xbd000af6, - 0xfcf0fc04, - 0xfcd0fce0, - 0xfca0fcb0, - 0xfe80fc90, - 0x80fc0088, - 0xf80032f4, -/* 0x060d: hub_barrier_done */ - 0x98010f01, - 0xfebb040e, - 0x8effb204, - 0x7e409418, - 0xf800008f, -/* 0x0621: ctx_redswitch */ - 0x80200f00, + 0x7effb201, + 0x7e00008f, + 0x98000314, + 0x00850504, + 0x06985040, + 0x0f64b604, +/* 0x04e3: tpc_strand_init_tpc_loop */ + 0xb80056bb, + 0x0005705e, + 0x0000657e, + 0x74bdf6b2, +/* 0x04f0: tpc_strand_init_idx_loop */ + 0x05605eb8, + 0x7e7fb200, + 0xb800008f, + 0x0005885e, + 0x7e082f95, + 0xb800008f, + 0x00058c5e, + 0x7e082f95, + 0xb800008f, + 0x0005905e, + 0x0000657e, + 0xb606f5b6, + 0xf4b601f0, + 0x002fbb08, + 0xb6003fbb, + 0x62b60170, + 0xbf1bf401, + 0x080050b7, + 0xf40142b6, + 0x3f0fa81b, + 0x501d608e, + 0xb201e5f0, + 0x008f7eff, + 0x8e0d0f00, + 0xf0501da8, + 0xffb201e5, + 0x00008f7e, + 0x0003147e, + 0x02010080, + 0xbd0003f6, + 0xf024bd04, + 0x00801f29, + 0x02f60230, +/* 0x0577: main */ + 0xf404bd00, + 0x28f40031, + 0x7e240d00, + 0xf4000037, + 0xe4b0f401, + 0x1d18f404, + 0x020181fe, + 0xfd20bd06, + 0xe4b60412, + 0x051efd01, + 0x7e0018fe, + 0xf400064a, +/* 0x05a6: main_not_ctx_xfer */ + 0xef94d40e, + 0x01f5f010, + 0x0002f87e, +/* 0x05b3: ih */ + 0xf9c70ef4, + 0x0188fe80, + 0x90f980f9, + 0xb0f9a0f9, + 0xe0f9d0f9, + 0x04bdf0f9, + 0xcf02004a, + 0xabc400aa, + 0x1f0bf404, + 0x004e240d, + 0x00eecf1a, + 0xcf19004f, + 0x047e00ff, + 0x010e0000, + 0xf61d0040, + 0x04bd000e, +/* 0x05f0: ih_no_fifo */ + 0xf6010040, + 0x04bd000a, + 0xe0fcf0fc, + 0xb0fcd0fc, + 0x90fca0fc, + 0x88fe80fc, + 0xf480fc00, + 0x01f80032, +/* 0x0610: hub_barrier_done */ + 0x0e98010f, + 0x04febb04, + 0x188effb2, + 0x8f7e4094, + 0x00f80000, +/* 0x0624: ctx_redswitch */ + 0x0080200f, + 0x0ff60185, + 0x0e04bd00, +/* 0x0631: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0x800200f5, 0xf6018500, 0x04bd000f, -/* 0x062e: ctx_redswitch_delay */ - 0xe2b6080e, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x01850080, - 0xbd000ff6, -/* 0x0647: ctx_xfer */ - 0x8000f804, - 0xf6028100, - 0x04bd000f, - 0xc48effb2, - 0xe5f0501d, - 0x008f7e01, - 0x0711f400, - 0x0006217e, -/* 0x0664: ctx_xfer_not_load */ - 0x0002167e, - 0xfc8024bd, - 0x02f60247, - 0xf004bd00, - 0x20b6012c, - 0x4afc8003, +/* 0x064a: ctx_xfer */ + 0x008000f8, + 0x0ff60281, + 0x8e04bd00, + 0xf0501dc4, + 0xffb201e5, + 0x00008f7e, + 0x7e0711f4, +/* 0x0667: ctx_xfer_not_load */ + 0x7e000624, + 0xbd000216, + 0x47fc8024, 0x0002f602, - 0x0c0f04bd, - 0xa88effb2, - 0xe5f0501d, - 0x008f7e01, - 0x03147e00, - 0xb23f0f00, - 0x1d608eff, - 0x01e5f050, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x8e0c0f04, + 0xf0501da8, + 0xffb201e5, 0x00008f7e, - 0xffb2000f, - 0x501d9c8e, - 0x7e01e5f0, + 0x0003147e, + 0x608e3f0f, + 0xe5f0501d, + 0x7effb201, 0x0f00008f, - 0x03147e01, - 0x01fcf000, - 0xb203f0b6, - 0x1da88eff, + 0x1d9c8e00, 0x01e5f050, - 0x00008f7e, - 0xf001acf0, - 0x008b02a5, - 0x0c985000, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x7e000e01, - 0xf000013d, - 0x008b01ac, - 0x0c985040, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x7e08004e, - 0xf000013d, + 0x8f7effb2, + 0x010f0000, + 0x0003147e, + 0xb601fcf0, + 0xa88e03f0, + 0xe5f0501d, + 0x7effb201, + 0xf000008f, 0xa5f001ac, - 0x30008b04, + 0x00008b02, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0x4e080f98, - 0x3d7e0200, - 0x0a7e0001, - 0x147e0002, - 0x01f40003, - 0x1a12f406, -/* 0x073c: ctx_xfer_post */ - 0x0002277e, - 0xffb20d0f, - 0x501da88e, - 0x7e01e5f0, - 0x7e00008f, -/* 0x0753: ctx_xfer_done */ - 0x7e000314, - 0xf800060d, - 0x00000000, + 0x010d9800, + 0x3d7e000e, + 0xacf00001, + 0x40008b01, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0x4e060f98, + 0x3d7e0800, + 0xacf00001, + 0x04a5f001, + 0x5030008b, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0x004e080f, + 0x013d7e02, + 0x020a7e00, + 0x03147e00, + 0x0601f400, +/* 0x073f: ctx_xfer_post */ + 0x7e1a12f4, + 0x0f000227, + 0x1da88e0d, + 0x01e5f050, + 0x8f7effb2, + 0x147e0000, +/* 0x0756: ctx_xfer_done */ + 0x107e0003, + 0x00f80006, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index dda7a7d224c9..9f5dfc85147a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, static int gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) { - struct gf100_gr *gr = (void *)object->engine; + struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); union { struct fermi_a_zbc_color_v0 v0; } *args = data; @@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) static int gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) { - struct gf100_gr *gr = (void *)object->engine; + struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); union { struct fermi_a_zbc_depth_v0 v0; } *args = data; @@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base) gr->ppc_nr[i] = gr->func->ppc_nr; for (j = 0; j < gr->ppc_nr[i]; j++) { u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); + if (mask) + gr->ppc_mask[i] |= (1 << j); gr->ppc_tpc_nr[i][j] = hweight8(mask); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 4611961b1187..02e78b8d93f6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -97,6 +97,7 @@ struct gf100_gr { u8 tpc_nr[GPC_MAX]; u8 tpc_total; u8 ppc_nr[GPC_MAX]; + u8 ppc_mask[GPC_MAX]; u8 ppc_tpc_nr[GPC_MAX][4]; struct nvkm_memory *unk4188b4; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index 895ba74057d4..1d7dd38292b3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -97,7 +97,9 @@ static void * nvkm_instobj_dtor(struct nvkm_memory *memory) { struct nvkm_instobj *iobj = nvkm_instobj(memory); + spin_lock(&iobj->imem->lock); list_del(&iobj->head); + spin_unlock(&iobj->imem->lock); nvkm_memory_del(&iobj->parent); return iobj; } @@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); iobj->parent = memory; iobj->imem = imem; + spin_lock(&iobj->imem->lock); list_add_tail(&iobj->head, &imem->list); + spin_unlock(&iobj->imem->lock); memory = &iobj->memory; } @@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func, { nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); imem->func = func; + spin_lock_init(&imem->lock); INIT_LIST_HEAD(&imem->list); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c index b61509e26ec9..b735173a18ff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c @@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv) duty = (uv - bios->base) * div / bios->pwm_range; nvkm_wr32(device, 0x20340, div); - nvkm_wr32(device, 0x20344, 0x8000000 | duty); + nvkm_wr32(device, 0x20344, 0x80000000 | duty); return 0; } diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c index 3f5e1cf138ba..d37ba2cb886e 100644 --- a/drivers/gpu/drm/radeon/rv730_dpm.c +++ b/drivers/gpu/drm/radeon/rv730_dpm.c @@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev) result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); if (result != PPSMC_Result_OK) - DRM_ERROR("Could not force DPM to low\n"); + DRM_DEBUG("Could not force DPM to low\n"); WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index b9c770745a7a..e830c8935db0 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev) result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); if (result != PPSMC_Result_OK) - DRM_ERROR("Could not force DPM to low.\n"); + DRM_DEBUG("Could not force DPM to low.\n"); WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); @@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev) int rv770_set_sw_state(struct radeon_device *rdev) { if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) - return -EINVAL; + DRM_DEBUG("rv770_set_sw_state failed\n"); return 0; } diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index f659e605a406..5178645ac42b 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -160,11 +160,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) } EXPORT_SYMBOL(nvm_erase_blk); -static void nvm_core_free(struct nvm_dev *dev) -{ - kfree(dev); -} - static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; @@ -179,12 +174,21 @@ static int nvm_core_init(struct nvm_dev *dev) dev->sec_size = grp->csecs; dev->oob_size = grp->sos; dev->sec_per_pg = grp->fpg_sz / grp->csecs; - dev->addr_mode = id->ppat; - dev->addr_format = id->ppaf; + memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); dev->plane_mode = NVM_PLANE_SINGLE; dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; + if (grp->mtype != 0) { + pr_err("nvm: memory type not supported\n"); + return -EINVAL; + } + + if (grp->fmtype != 0 && grp->fmtype != 1) { + pr_err("nvm: flash type not supported\n"); + return -EINVAL; + } + if (grp->mpos & 0x020202) dev->plane_mode = NVM_PLANE_DOUBLE; if (grp->mpos & 0x040404) @@ -213,21 +217,18 @@ static void nvm_free(struct nvm_dev *dev) if (dev->mt) dev->mt->unregister_mgr(dev); - - nvm_core_free(dev); } static int nvm_init(struct nvm_dev *dev) { struct nvmm_type *mt; - int ret = 0; + int ret = -EINVAL; if (!dev->q || !dev->ops) - return -EINVAL; + return ret; if (dev->ops->identity(dev->q, &dev->identity)) { pr_err("nvm: device could not be identified\n"); - ret = -EINVAL; goto err; } @@ -273,7 +274,6 @@ static int nvm_init(struct nvm_dev *dev) dev->nr_chnls); return 0; err: - nvm_free(dev); pr_err("nvm: failed to initialize nvm\n"); return ret; } @@ -308,22 +308,24 @@ int nvm_register(struct request_queue *q, char *disk_name, if (ret) goto err_init; - down_write(&nvm_lock); - list_add(&dev->devices, &nvm_devices); - up_write(&nvm_lock); - if (dev->ops->max_phys_sect > 1) { dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, "ppalist"); if (!dev->ppalist_pool) { pr_err("nvm: could not create ppa pool\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_init; } } else if (dev->ops->max_phys_sect > 256) { pr_info("nvm: max sectors supported is 256.\n"); - return -EINVAL; + ret = -EINVAL; + goto err_init; } + down_write(&nvm_lock); + list_add(&dev->devices, &nvm_devices); + up_write(&nvm_lock); + return 0; err_init: kfree(dev); @@ -341,11 +343,12 @@ void nvm_unregister(char *disk_name) return; } - nvm_exit(dev); - down_write(&nvm_lock); list_del(&dev->devices); up_write(&nvm_lock); + + nvm_exit(dev); + kfree(dev); } EXPORT_SYMBOL(nvm_unregister); @@ -457,11 +460,11 @@ static void nvm_remove_target(struct nvm_target *t) lockdep_assert_held(&nvm_lock); del_gendisk(tdisk); + blk_cleanup_queue(q); + if (tt->exit) tt->exit(tdisk->private_data); - blk_cleanup_queue(q); - put_disk(tdisk); list_del(&t->list); @@ -541,7 +544,7 @@ static int nvm_configure_show(const char *val) if (!dev->mt) return 0; - dev->mt->free_blocks_print(dev); + dev->mt->lun_info_print(dev); return 0; } diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index ae1fb2bdc5f4..e20e74ec6b91 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -60,23 +60,28 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) lun->vlun.lun_id = i % dev->luns_per_chnl; lun->vlun.chnl_id = i / dev->luns_per_chnl; lun->vlun.nr_free_blocks = dev->blks_per_lun; + lun->vlun.nr_inuse_blocks = 0; + lun->vlun.nr_bad_blocks = 0; } return 0; } -static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, +static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, void *private) { struct gen_nvm *gn = private; - struct gen_lun *lun = &gn->luns[lun_id]; + struct nvm_dev *dev = gn->dev; + struct gen_lun *lun; struct nvm_block *blk; int i; - if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) - return 0; + ppa = dev_to_generic_addr(gn->dev, ppa); + lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; + + for (i = 0; i < nr_blocks; i++) { + if (blks[i] == 0) + continue; - i = -1; - while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) { blk = &lun->vlun.blocks[i]; if (!blk) { pr_err("gennvm: BB data is out of bounds.\n"); @@ -84,6 +89,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, } list_move_tail(&blk->list, &lun->bb_list); + lun->vlun.nr_bad_blocks++; } return 0; @@ -136,6 +142,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) list_move_tail(&blk->list, &lun->used_list); blk->type = 1; lun->vlun.nr_free_blocks--; + lun->vlun.nr_inuse_blocks++; } } @@ -164,15 +171,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) block->id = cur_block_id++; /* First block is reserved for device */ - if (unlikely(lun_iter == 0 && blk_iter == 0)) + if (unlikely(lun_iter == 0 && blk_iter == 0)) { + lun->vlun.nr_free_blocks--; continue; + } list_add_tail(&block->list, &lun->free_list); } if (dev->ops->get_bb_tbl) { - ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, - dev->blks_per_lun, gennvm_block_bb, gn); + struct ppa_addr ppa; + + ppa.ppa = 0; + ppa.g.ch = lun->vlun.chnl_id; + ppa.g.lun = lun->vlun.id; + ppa = generic_to_dev_addr(dev, ppa); + + ret = dev->ops->get_bb_tbl(dev->q, ppa, + dev->blks_per_lun, + gennvm_block_bb, gn); if (ret) pr_err("gennvm: could not read BB table\n"); } @@ -199,6 +216,7 @@ static int gennvm_register(struct nvm_dev *dev) if (!gn) return -ENOMEM; + gn->dev = dev; gn->nr_luns = dev->nr_luns; dev->mp = gn; @@ -254,6 +272,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, blk->type = 1; lun->vlun.nr_free_blocks--; + lun->vlun.nr_inuse_blocks++; spin_unlock(&vlun->lock); out: @@ -271,16 +290,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) case 1: list_move_tail(&blk->list, &lun->free_list); lun->vlun.nr_free_blocks++; + lun->vlun.nr_inuse_blocks--; blk->type = 0; break; case 2: list_move_tail(&blk->list, &lun->bb_list); + lun->vlun.nr_bad_blocks++; + lun->vlun.nr_inuse_blocks--; break; default: WARN_ON_ONCE(1); pr_err("gennvm: erroneous block type (%lu -> %u)\n", blk->id, blk->type); list_move_tail(&blk->list, &lun->bb_list); + lun->vlun.nr_bad_blocks++; + lun->vlun.nr_inuse_blocks--; } spin_unlock(&vlun->lock); @@ -292,10 +316,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) if (rqd->nr_pages > 1) { for (i = 0; i < rqd->nr_pages; i++) - rqd->ppa_list[i] = addr_to_generic_mode(dev, + rqd->ppa_list[i] = dev_to_generic_addr(dev, rqd->ppa_list[i]); } else { - rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); + rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); } } @@ -305,10 +329,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) if (rqd->nr_pages > 1) { for (i = 0; i < rqd->nr_pages; i++) - rqd->ppa_list[i] = generic_to_addr_mode(dev, + rqd->ppa_list[i] = generic_to_dev_addr(dev, rqd->ppa_list[i]); } else { - rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); + rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); } } @@ -354,10 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) { int i; - if (!dev->ops->set_bb) + if (!dev->ops->set_bb_tbl) return; - if (dev->ops->set_bb(dev->q, rqd, 1)) + if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) return; gennvm_addr_to_generic_mode(dev, rqd); @@ -440,15 +464,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) return &gn->luns[lunid].vlun; } -static void gennvm_free_blocks_print(struct nvm_dev *dev) +static void gennvm_lun_info_print(struct nvm_dev *dev) { struct gen_nvm *gn = dev->mp; struct gen_lun *lun; unsigned int i; - gennvm_for_each_lun(gn, lun, i) - pr_info("%s: lun%8u\t%u\n", - dev->name, i, lun->vlun.nr_free_blocks); + + gennvm_for_each_lun(gn, lun, i) { + spin_lock(&lun->vlun.lock); + + pr_info("%s: lun%8u\t%u\t%u\t%u\n", + dev->name, i, + lun->vlun.nr_free_blocks, + lun->vlun.nr_inuse_blocks, + lun->vlun.nr_bad_blocks); + + spin_unlock(&lun->vlun.lock); + } } static struct nvmm_type gennvm = { @@ -466,7 +499,7 @@ static struct nvmm_type gennvm = { .erase_blk = gennvm_erase_blk, .get_lun = gennvm_get_lun, - .free_blocks_print = gennvm_free_blocks_print, + .lun_info_print = gennvm_lun_info_print, }; static int __init gennvm_module_init(void) diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h index d23bd3501ddc..9c24b5b32dac 100644 --- a/drivers/lightnvm/gennvm.h +++ b/drivers/lightnvm/gennvm.h @@ -35,6 +35,8 @@ struct gen_lun { }; struct gen_nvm { + struct nvm_dev *dev; + int nr_luns; struct gen_lun *luns; }; diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 7ba64c87ba1c..75e59c3a3f96 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) return blk->id * rrpc->dev->pgs_per_blk; } +static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, + struct ppa_addr r) +{ + struct ppa_addr l; + int secs, pgs, blks, luns; + sector_t ppa = r.ppa; + + l.ppa = 0; + + div_u64_rem(ppa, dev->sec_per_pg, &secs); + l.g.sec = secs; + + sector_div(ppa, dev->sec_per_pg); + div_u64_rem(ppa, dev->sec_per_blk, &pgs); + l.g.pg = pgs; + + sector_div(ppa, dev->pgs_per_blk); + div_u64_rem(ppa, dev->blks_per_lun, &blks); + l.g.blk = blks; + + sector_div(ppa, dev->blks_per_lun); + div_u64_rem(ppa, dev->luns_per_chnl, &luns); + l.g.lun = luns; + + sector_div(ppa, dev->luns_per_chnl); + l.g.ch = ppa; + + return l; +} + static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) { struct ppa_addr paddr; paddr.ppa = addr; - return __linear_to_generic_addr(dev, paddr); + return linear_to_generic_addr(dev, paddr); } /* requires lun->lock taken */ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 917d47e290ae..3147c8d09ea8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -112,7 +112,8 @@ struct iv_tcw_private { * and encrypts / decrypts at the same time. */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, + DM_CRYPT_EXIT_THREAD}; /* * The fields in here must be read only after initialization. @@ -1203,20 +1204,18 @@ continue_locked: if (!RB_EMPTY_ROOT(&cc->write_tree)) goto pop_from_list; + if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) { + spin_unlock_irq(&cc->write_thread_wait.lock); + break; + } + __set_current_state(TASK_INTERRUPTIBLE); __add_wait_queue(&cc->write_thread_wait, &wait); spin_unlock_irq(&cc->write_thread_wait.lock); - if (unlikely(kthread_should_stop())) { - set_task_state(current, TASK_RUNNING); - remove_wait_queue(&cc->write_thread_wait, &wait); - break; - } - schedule(); - set_task_state(current, TASK_RUNNING); spin_lock_irq(&cc->write_thread_wait.lock); __remove_wait_queue(&cc->write_thread_wait, &wait); goto continue_locked; @@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti) if (!cc) return; - if (cc->write_thread) + if (cc->write_thread) { + spin_lock_irq(&cc->write_thread_wait.lock); + set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags); + wake_up_locked(&cc->write_thread_wait); + spin_unlock_irq(&cc->write_thread_wait.lock); kthread_stop(cc->write_thread); + } if (cc->io_queue) destroy_workqueue(cc->io_queue); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aaa6caa46a9f..cfa29f574c2a 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, fmode_t *mode) { struct multipath *m = ti->private; - struct pgpath *pgpath; unsigned long flags; int r; - r = 0; - spin_lock_irqsave(&m->lock, flags); if (!m->current_pgpath) __choose_pgpath(m, 0); - pgpath = m->current_pgpath; - - if (pgpath) { - *bdev = pgpath->path.dev->bdev; - *mode = pgpath->path.dev->mode; + if (m->current_pgpath) { + if (!m->queue_io) { + *bdev = m->current_pgpath->path.dev->bdev; + *mode = m->current_pgpath->path.dev->mode; + r = 0; + } else { + /* pg_init has not started or completed */ + r = -ENOTCONN; + } + } else { + /* No path is available */ + if (m->queue_if_no_path) + r = -ENOTCONN; + else + r = -EIO; } - if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) - r = -ENOTCONN; - else if (!*bdev) - r = -EIO; - spin_unlock_irqrestore(&m->lock, flags); - if (r == -ENOTCONN && !fatal_signal_pending(current)) { + if (r == -ENOTCONN) { spin_lock_irqsave(&m->lock, flags); if (!m->current_pg) { /* Path status changed, redo selection */ diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 3897b90bd462..63903a5a5d9e 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) case PM_WRITE: if (old_mode != new_mode) notify_of_pool_mode_change(pool, "write"); + pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; dm_pool_metadata_read_write(pool->pmd); pool->process_bio = process_bio; pool->process_discard = process_discard_bio; @@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct thin_c *tc = ti->private; struct pool *pool = tc->pool; - struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md); - if (!pool_limits->discard_granularity) - return; /* pool's discard support is disabled */ + if (!pool->pf.discard_enabled) + return; limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6e15f3565892..5df40480228b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -591,7 +591,7 @@ retry: out: dm_put_live_table(md, *srcu_idx); - if (r == -ENOTCONN) { + if (r == -ENOTCONN && !fatal_signal_pending(current)) { msleep(10); goto retry; } @@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, { struct mapped_device *md = bdev->bd_disk->private_data; struct dm_target *tgt; + struct block_device *tgt_bdev = NULL; int srcu_idx, r; - r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); + r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx); if (r < 0) return r; @@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, goto out; } - r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg); out: dm_put_live_table(md, srcu_idx); return r; diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index e0b7b95813bc..9202d1a468d0 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -93,7 +93,7 @@ struct nvme_nvm_l2ptbl { __le16 cdw14[6]; }; -struct nvme_nvm_bbtbl { +struct nvme_nvm_getbbtbl { __u8 opcode; __u8 flags; __u16 command_id; @@ -101,10 +101,23 @@ struct nvme_nvm_bbtbl { __u64 rsvd[2]; __le64 prp1; __le64 prp2; - __le32 prp1_len; - __le32 prp2_len; - __le32 lbb; - __u32 rsvd11[3]; + __le64 spba; + __u32 rsvd4[4]; +}; + +struct nvme_nvm_setbbtbl { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd[2]; + __le64 prp1; + __le64 prp2; + __le64 spba; + __le16 nlb; + __u8 value; + __u8 rsvd3; + __u32 rsvd4[3]; }; struct nvme_nvm_erase_blk { @@ -129,8 +142,8 @@ struct nvme_nvm_command { struct nvme_nvm_hb_rw hb_rw; struct nvme_nvm_ph_rw ph_rw; struct nvme_nvm_l2ptbl l2p; - struct nvme_nvm_bbtbl get_bb; - struct nvme_nvm_bbtbl set_bb; + struct nvme_nvm_getbbtbl get_bb; + struct nvme_nvm_setbbtbl set_bb; struct nvme_nvm_erase_blk erase; }; }; @@ -142,11 +155,13 @@ struct nvme_nvm_id_group { __u8 num_ch; __u8 num_lun; __u8 num_pln; + __u8 rsvd1; __le16 num_blk; __le16 num_pg; __le16 fpg_sz; __le16 csecs; __le16 sos; + __le16 rsvd2; __le32 trdt; __le32 trdm; __le32 tprt; @@ -154,8 +169,9 @@ struct nvme_nvm_id_group { __le32 tbet; __le32 tbem; __le32 mpos; + __le32 mccap; __le16 cpar; - __u8 reserved[913]; + __u8 reserved[906]; } __packed; struct nvme_nvm_addr_format { @@ -178,15 +194,28 @@ struct nvme_nvm_id { __u8 ver_id; __u8 vmnt; __u8 cgrps; - __u8 res[5]; + __u8 res; __le32 cap; __le32 dom; struct nvme_nvm_addr_format ppaf; - __u8 ppat; - __u8 resv[223]; + __u8 resv[228]; struct nvme_nvm_id_group groups[4]; } __packed; +struct nvme_nvm_bb_tbl { + __u8 tblid[4]; + __le16 verid; + __le16 revid; + __le32 rvsd1; + __le32 tblks; + __le32 tfact; + __le32 tgrown; + __le32 tdresv; + __le32 thresv; + __le32 rsvd2[8]; + __u8 blk[0]; +}; + /* * Check we didn't inadvertently grow the command struct */ @@ -195,12 +224,14 @@ static inline void _nvme_nvm_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); - BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); + BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); + BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); + BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512); } static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) @@ -234,6 +265,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) dst->tbet = le32_to_cpu(src->tbet); dst->tbem = le32_to_cpu(src->tbem); dst->mpos = le32_to_cpu(src->mpos); + dst->mccap = le32_to_cpu(src->mccap); dst->cpar = le16_to_cpu(src->cpar); } @@ -244,6 +276,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) { struct nvme_ns *ns = q->queuedata; + struct nvme_dev *dev = ns->dev; struct nvme_nvm_id *nvme_nvm_id; struct nvme_nvm_command c = {}; int ret; @@ -256,8 +289,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) if (!nvme_nvm_id) return -ENOMEM; - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, - sizeof(struct nvme_nvm_id)); + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + nvme_nvm_id, sizeof(struct nvme_nvm_id)); if (ret) { ret = -EIO; goto out; @@ -268,6 +301,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) nvm_id->cgrps = nvme_nvm_id->cgrps; nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); + memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, + sizeof(struct nvme_nvm_addr_format)); ret = init_grps(nvm_id, nvme_nvm_id); out: @@ -281,7 +316,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, struct nvme_ns *ns = q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_command c = {}; - u32 len = queue_max_hw_sectors(q) << 9; + u32 len = queue_max_hw_sectors(dev->admin_q) << 9; u32 nlb_pr_rq = len / sizeof(u64); u64 cmd_slba = slba; void *entries; @@ -299,8 +334,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, c.l2p.slba = cpu_to_le64(cmd_slba); c.l2p.nlb = cpu_to_le32(cmd_nlb); - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, - entries, len); + ret = nvme_submit_sync_cmd(dev->admin_q, + (struct nvme_command *)&c, entries, len); if (ret) { dev_err(dev->dev, "L2P table transfer failed (%d)\n", ret); @@ -322,43 +357,82 @@ out: return ret; } -static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, - unsigned int nr_blocks, - nvm_bb_update_fn *update_bbtbl, void *priv) +static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa, + int nr_blocks, nvm_bb_update_fn *update_bbtbl, + void *priv) { struct nvme_ns *ns = q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_command c = {}; - void *bb_bitmap; - u16 bb_bitmap_size; + struct nvme_nvm_bb_tbl *bb_tbl; + int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; int ret = 0; c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; c.get_bb.nsid = cpu_to_le32(ns->ns_id); - c.get_bb.lbb = cpu_to_le32(lunid); - bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE; - bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL); - if (!bb_bitmap) - return -ENOMEM; + c.get_bb.spba = cpu_to_le64(ppa.ppa); - bitmap_zero(bb_bitmap, nr_blocks); + bb_tbl = kzalloc(tblsz, GFP_KERNEL); + if (!bb_tbl) + return -ENOMEM; - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, - bb_bitmap_size); + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + bb_tbl, tblsz); if (ret) { dev_err(dev->dev, "get bad block table failed (%d)\n", ret); ret = -EIO; goto out; } - ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); + if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || + bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { + dev_err(dev->dev, "bbt format mismatch\n"); + ret = -EINVAL; + goto out; + } + + if (le16_to_cpu(bb_tbl->verid) != 1) { + ret = -EINVAL; + dev_err(dev->dev, "bbt version not supported\n"); + goto out; + } + + if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { + ret = -EINVAL; + dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", + le32_to_cpu(bb_tbl->tblks), nr_blocks); + goto out; + } + + ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); if (ret) { ret = -EINTR; goto out; } out: - kfree(bb_bitmap); + kfree(bb_tbl); + return ret; +} + +static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, + int type) +{ + struct nvme_ns *ns = q->queuedata; + struct nvme_dev *dev = ns->dev; + struct nvme_nvm_command c = {}; + int ret = 0; + + c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; + c.set_bb.nsid = cpu_to_le32(ns->ns_id); + c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa); + c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); + c.set_bb.value = type; + + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + NULL, 0); + if (ret) + dev_err(dev->dev, "set bad block table failed (%d)\n", ret); return ret; } @@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { .get_l2p_tbl = nvme_nvm_get_l2p_tbl, .get_bb_tbl = nvme_nvm_get_bb_tbl, + .set_bb_tbl = nvme_nvm_set_bb_tbl, .submit_io = nvme_nvm_submit_io, .erase_block = nvme_nvm_erase_block, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8187df204695..f3b53af789ef 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, goto retry_cmd; } if (blk_integrity_rq(req)) { - if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) + if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) { + dma_unmap_sg(dev->dev, iod->sg, iod->nents, + dma_dir); goto error_cmd; + } sg_init_table(iod->meta_sg, 1); if (blk_rq_map_integrity_sg( - req->q, req->bio, iod->meta_sg) != 1) + req->q, req->bio, iod->meta_sg) != 1) { + dma_unmap_sg(dev->dev, iod->sg, iod->nents, + dma_dir); goto error_cmd; + } if (rq_data_dir(req)) nvme_dif_remap(req, nvme_dif_prep); - if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) + if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) { + dma_unmap_sg(dev->dev, iod->sg, iod->nents, + dma_dir); goto error_cmd; + } } } @@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) return; - writel(head, nvmeq->q_db + nvmeq->dev->db_stride); + if (likely(nvmeq->cq_vector >= 0)) + writel(head, nvmeq->q_db + nvmeq->dev->db_stride); nvmeq->cq_head = head; nvmeq->cq_phase = phase; @@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) u32 aqa; u64 cap = lo_hi_readq(&dev->bar->cap); struct nvme_queue *nvmeq; - unsigned page_shift = PAGE_SHIFT; + /* + * default to a 4K page size, with the intention to update this + * path in the future to accomodate architectures with differing + * kernel and IO page sizes. + */ + unsigned page_shift = 12; unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; - unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; if (page_shift < dev_page_min) { dev_err(dev->dev, @@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) 1 << page_shift); return -ENODEV; } - if (page_shift > dev_page_max) { - dev_info(dev->dev, - "Device maximum page size (%u) smaller than " - "host (%u); enabling work-around\n", - 1 << dev_page_max, 1 << page_shift); - page_shift = dev_page_max; - } dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? NVME_CAP_NSSRC(cap) : 0; @@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) if (dev->max_hw_sectors) { blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); blk_queue_max_segments(ns->queue, - ((dev->max_hw_sectors << 9) / dev->page_size) + 1); + (dev->max_hw_sectors / (dev->page_size >> 9)) + 1); } if (dev->stripe_size) blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); @@ -2787,6 +2794,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq) { struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; nvme_put_dq(dq); + + spin_lock_irq(&nvmeq->q_lock); + nvme_process_cq(nvmeq); + spin_unlock_irq(&nvmeq->q_lock); } static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 540f077c37ea..02a7452bdf23 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -440,7 +440,6 @@ int dw_pcie_host_init(struct pcie_port *pp) ret, pp->io); continue; } - pp->io_base = pp->io->start; break; case IORESOURCE_MEM: pp->mem = win->res; diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c index 35457ecd8e70..163671a4f798 100644 --- a/drivers/pci/host/pcie-hisi.c +++ b/drivers/pci/host/pcie-hisi.c @@ -111,7 +111,7 @@ static struct pcie_host_ops hisi_pcie_host_ops = { .link_up = hisi_pcie_link_up, }; -static int __init hisi_add_pcie_port(struct pcie_port *pp, +static int hisi_add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) { int ret; @@ -139,7 +139,7 @@ static int __init hisi_add_pcie_port(struct pcie_port *pp, return 0; } -static int __init hisi_pcie_probe(struct platform_device *pdev) +static int hisi_pcie_probe(struct platform_device *pdev) { struct hisi_pcie *hisi_pcie; struct pcie_port *pp; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 4446fcb5effd..d7ffd66814bb 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1146,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev) pci_dev->state_saved = false; pci_dev->no_d3cold = false; error = pm->runtime_suspend(dev); - suspend_report_result(pm->runtime_suspend, error); - if (error) + if (error) { + /* + * -EBUSY and -EAGAIN is used to request the runtime PM core + * to schedule a new suspend, so log the event only with debug + * log level. + */ + if (error == -EBUSY || error == -EAGAIN) + dev_dbg(dev, "can't suspend now (%pf returned %d)\n", + pm->runtime_suspend, error); + else + dev_err(dev, "can't suspend (%pf returned %d)\n", + pm->runtime_suspend, error); + return error; + } if (!pci_dev->d3cold_allowed) pci_dev->no_d3cold = true; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 92618686604c..eead54cd01b2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -216,7 +216,10 @@ static ssize_t numa_node_store(struct device *dev, if (ret) return ret; - if (node >= MAX_NUMNODES || !node_online(node)) + if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) + return -EINVAL; + + if (node != NUMA_NO_NODE && !node_online(node)) return -EINVAL; add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fd2f03fa53f3..d390fc1475ec 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -337,6 +337,4 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) } #endif -struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); - #endif /* DRIVERS_PCI_H */ diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 188006c55ce0..aa705bb4748c 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -15,9 +15,6 @@ #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> -#include <linux/pm_wakeirq.h> #include <linux/rtc/ds1307.h> #include <linux/rtc.h> #include <linux/slab.h> @@ -117,7 +114,6 @@ struct ds1307 { #define HAS_ALARM 1 /* bit 1 == irq claimed */ struct i2c_client *client; struct rtc_device *rtc; - int wakeirq; s32 (*read_block_data)(const struct i2c_client *client, u8 command, u8 length, u8 *values); s32 (*write_block_data)(const struct i2c_client *client, u8 command, @@ -1138,7 +1134,10 @@ read_rtc: bin2bcd(tmp)); } - device_set_wakeup_capable(&client->dev, want_irq); + if (want_irq) { + device_set_wakeup_capable(&client->dev, true); + set_bit(HAS_ALARM, &ds1307->flags); + } ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, rtc_ops, THIS_MODULE); if (IS_ERR(ds1307->rtc)) { @@ -1146,43 +1145,19 @@ read_rtc: } if (want_irq) { - struct device_node *node = client->dev.of_node; - err = devm_request_threaded_irq(&client->dev, client->irq, NULL, irq_handler, IRQF_SHARED | IRQF_ONESHOT, ds1307->rtc->name, client); if (err) { client->irq = 0; + device_set_wakeup_capable(&client->dev, false); + clear_bit(HAS_ALARM, &ds1307->flags); dev_err(&client->dev, "unable to request IRQ!\n"); - goto no_irq; - } - - set_bit(HAS_ALARM, &ds1307->flags); - dev_dbg(&client->dev, "got IRQ %d\n", client->irq); - - /* Currently supported by OF code only! */ - if (!node) - goto no_irq; - - err = of_irq_get(node, 1); - if (err <= 0) { - if (err == -EPROBE_DEFER) - goto exit; - goto no_irq; - } - ds1307->wakeirq = err; - - err = dev_pm_set_dedicated_wake_irq(&client->dev, - ds1307->wakeirq); - if (err) { - dev_err(&client->dev, "unable to setup wakeIRQ %d!\n", - err); - goto exit; - } + } else + dev_dbg(&client->dev, "got IRQ %d\n", client->irq); } -no_irq: if (chip->nvram_size) { ds1307->nvram = devm_kzalloc(&client->dev, @@ -1226,9 +1201,6 @@ static int ds1307_remove(struct i2c_client *client) { struct ds1307 *ds1307 = i2c_get_clientdata(client); - if (ds1307->wakeirq) - dev_pm_clear_wake_irq(&client->dev); - if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 3ba2e9564b9a..81af294f15a7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); } -CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable); +CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable); CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index 9d5068248aa0..0a4ea809a61b 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -23,6 +23,7 @@ config MTK_PMIC_WRAP config MTK_SCPSYS bool "MediaTek SCPSYS Support" depends on ARCH_MEDIATEK || COMPILE_TEST + default ARM64 && ARCH_MEDIATEK select REGMAP select MTK_INFRACFG select PM_GENERIC_DOMAINS if PM diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index f3a0b6a4b54e..8c03a80b482d 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -1179,7 +1179,7 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev) block++; if (!block->size) - return 0; + continue; dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", block->phys, block->virt, block->size); @@ -1519,9 +1519,9 @@ static int knav_queue_load_pdsp(struct knav_device *kdev, for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { if (knav_acc_firmwares[i]) { - ret = request_firmware(&fw, - knav_acc_firmwares[i], - kdev->dev); + ret = request_firmware_direct(&fw, + knav_acc_firmwares[i], + kdev->dev); if (!ret) { found = true; break; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 342a07c58d89..72204fbf2bb1 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4074,6 +4074,17 @@ reject: return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } +static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) +{ + bool ret; + + spin_lock_bh(&conn->state_lock); + ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); + spin_unlock_bh(&conn->state_lock); + + return ret; +} + int iscsi_target_rx_thread(void *arg) { int ret, rc; @@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg) * incoming iscsi/tcp socket I/O, and/or failing the connection. */ rc = wait_for_completion_interruptible(&conn->rx_login_comp); - if (rc < 0) + if (rc < 0 || iscsi_target_check_conn_state(conn)) return 0; if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 5c964c09c89f..9fc9117d0f22 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -388,6 +388,7 @@ err: if (login->login_complete) { if (conn->rx_thread && conn->rx_thread_active) { send_sig(SIGINT, conn->rx_thread, 1); + complete(&conn->rx_login_comp); kthread_stop(conn->rx_thread); } if (conn->tx_thread && conn->tx_thread_active) { diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 51d1734d5390..2cbea2af7cd0 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) if (!pl) { pr_err("Unable to allocate memory for" " struct iscsi_param_list.\n"); - return -1 ; + return -ENOMEM; } INIT_LIST_HEAD(&pl->param_list); INIT_LIST_HEAD(&pl->extra_response_list); @@ -578,7 +578,7 @@ int iscsi_copy_param_list( param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); if (!param_list) { pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(¶m_list->param_list); INIT_LIST_HEAD(¶m_list->extra_response_list); @@ -629,7 +629,7 @@ int iscsi_copy_param_list( err_out: iscsi_release_param_list(param_list); - return -1; + return -ENOMEM; } static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) @@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response( if (!extra_response) { pr_err("Unable to allocate memory for" " struct iscsi_extra_response.\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(&extra_response->er_list); @@ -1370,7 +1370,7 @@ int iscsi_decode_text_input( tmpbuf = kzalloc(length + 1, GFP_KERNEL); if (!tmpbuf) { pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); - return -1; + return -ENOMEM; } memcpy(tmpbuf, textbuf, length); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 0b4b2a67d9f9..98698d875742 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o return 0; } -static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) +static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, + int *post_ret) { unsigned char *buf, *addr; struct scatterlist *sg; @@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd) cmd->data_direction); } -static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) +static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, + int *post_ret) { struct se_device *dev = cmd->se_dev; @@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) * sent to the backend driver. */ spin_lock_irq(&cmd->t_state_lock); - if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) + if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; + *post_ret = 1; + } spin_unlock_irq(&cmd->t_state_lock); /* @@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) return TCM_NO_SENSE; } -static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) +static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, + int *post_ret) { struct se_device *dev = cmd->se_dev; struct scatterlist *write_sg = NULL, *sg; @@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes if (block_size < PAGE_SIZE) { sg_set_page(&write_sg[i], m.page, block_size, - block_size); + m.piter.sg->offset + block_size); } else { sg_miter_next(&m); sg_set_page(&write_sg[i], m.page, block_size, - 0); + m.piter.sg->offset); } len -= block_size; i++; diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 273c72b2b83d..81a6b3e07687 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page) char str[sizeof(dev->t10_wwn.model)+1]; /* scsiLuProductId */ - for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) + for (i = 0; i < sizeof(dev->t10_wwn.model); i++) str[i] = ISPRINT(dev->t10_wwn.model[i]) ? dev->t10_wwn.model[i] : ' '; str[i] = '\0'; diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 5b2820312310..28fb3016370f 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -130,6 +130,9 @@ void core_tmr_abort_task( if (tmr->ref_task_tag != ref_tag) continue; + if (!kref_get_unless_zero(&se_cmd->cmd_kref)) + continue; + printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", se_cmd->se_tfo->get_fabric_name(), ref_tag); @@ -139,13 +142,15 @@ void core_tmr_abort_task( " skipping\n", ref_tag); spin_unlock(&se_cmd->t_state_lock); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + target_put_sess_cmd(se_cmd); + goto out; } se_cmd->transport_state |= CMD_T_ABORTED; spin_unlock(&se_cmd->t_state_lock); list_del_init(&se_cmd->se_cmd_list); - kref_get(&se_cmd->cmd_kref); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); cancel_work_sync(&se_cmd->work); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 5bacc7b5ed6d..4fdcee2006d1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) void transport_generic_request_failure(struct se_cmd *cmd, sense_reason_t sense_reason) { - int ret = 0; + int ret = 0, post_ret = 0; pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); @@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, */ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && cmd->transport_complete_callback) - cmd->transport_complete_callback(cmd, false); + cmd->transport_complete_callback(cmd, false, &post_ret); switch (sense_reason) { case TCM_NON_EXISTENT_LUN: @@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work) */ if (cmd->transport_complete_callback) { sense_reason_t rc; + bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); + bool zero_dl = !(cmd->data_length); + int post_ret = 0; - rc = cmd->transport_complete_callback(cmd, true); - if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { - if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && - !cmd->data_length) + rc = cmd->transport_complete_callback(cmd, true, &post_ret); + if (!rc && !post_ret) { + if (caw && zero_dl) goto queue_rsp; return; @@ -2507,23 +2509,24 @@ out: EXPORT_SYMBOL(target_get_sess_cmd); static void target_release_cmd_kref(struct kref *kref) - __releases(&se_cmd->se_sess->sess_cmd_lock) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct se_session *se_sess = se_cmd->se_sess; + unsigned long flags; + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); se_cmd->se_tfo->release_cmd(se_cmd); return; } if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); complete(&se_cmd->cmd_wait_comp); return; } list_del(&se_cmd->se_cmd_list); - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); se_cmd->se_tfo->release_cmd(se_cmd); } @@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) se_cmd->se_tfo->release_cmd(se_cmd); return 1; } - return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, - &se_sess->sess_cmd_lock); + return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 937cebf76633..5e6d6cb348fc 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) return 0; - if (!time_after(cmd->deadline, jiffies)) + if (!time_after(jiffies, cmd->deadline)) return 0; set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); @@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd) static const struct target_backend_ops tcmu_ops = { .name = "user", - .inquiry_prod = "USER", - .inquiry_rev = TCMU_VERSION, .owner = THIS_MODULE, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .attach_hba = tcmu_attach_hba, diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index c463c89b90ef..8cc4ac64a91c 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -382,7 +382,7 @@ endmenu config QCOM_SPMI_TEMP_ALARM tristate "Qualcomm SPMI PMIC Temperature Alarm" - depends on OF && (SPMI || COMPILE_TEST) && IIO + depends on OF && SPMI && IIO select REGMAP_SPMI help This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c8fe3cac2e0e..c5547bd711db 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -55,6 +55,7 @@ #define TEMPSENSE2_PANIC_VALUE_SHIFT 16 #define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 +#define OCOTP_MEM0 0x0480 #define OCOTP_ANA1 0x04e0 /* The driver supports 1 passive trip point and 1 critical trip point */ @@ -64,12 +65,6 @@ enum imx_thermal_trip { IMX_TRIP_NUM, }; -/* - * It defines the temperature in millicelsius for passive trip point - * that will trigger cooling action when crossed. - */ -#define IMX_TEMP_PASSIVE 85000 - #define IMX_POLLING_DELAY 2000 /* millisecond */ #define IMX_PASSIVE_DELAY 1000 @@ -100,12 +95,14 @@ struct imx_thermal_data { u32 c1, c2; /* See formula in imx_get_sensor_data() */ int temp_passive; int temp_critical; + int temp_max; int alarm_temp; int last_temp; bool irq_enabled; int irq; struct clk *thermal_clk; const struct thermal_soc_data *socdata; + const char *temp_grade; }; static void imx_set_panic_temp(struct imx_thermal_data *data, @@ -285,10 +282,12 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, { struct imx_thermal_data *data = tz->devdata; + /* do not allow changing critical threshold */ if (trip == IMX_TRIP_CRITICAL) return -EPERM; - if (temp < 0 || temp > IMX_TEMP_PASSIVE) + /* do not allow passive to be set higher than critical */ + if (temp < 0 || temp > data->temp_critical) return -EINVAL; data->temp_passive = temp; @@ -404,17 +403,39 @@ static int imx_get_sensor_data(struct platform_device *pdev) data->c1 = temp64; data->c2 = n1 * data->c1 + 1000 * t1; - /* - * Set the default passive cooling trip point, - * can be changed from userspace. - */ - data->temp_passive = IMX_TEMP_PASSIVE; + /* use OTP for thermal grade */ + ret = regmap_read(map, OCOTP_MEM0, &val); + if (ret) { + dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret); + return ret; + } + + /* The maximum die temp is specified by the Temperature Grade */ + switch ((val >> 6) & 0x3) { + case 0: /* Commercial (0 to 95C) */ + data->temp_grade = "Commercial"; + data->temp_max = 95000; + break; + case 1: /* Extended Commercial (-20 to 105C) */ + data->temp_grade = "Extended Commercial"; + data->temp_max = 105000; + break; + case 2: /* Industrial (-40 to 105C) */ + data->temp_grade = "Industrial"; + data->temp_max = 105000; + break; + case 3: /* Automotive (-40 to 125C) */ + data->temp_grade = "Automotive"; + data->temp_max = 125000; + break; + } /* - * The maximum die temperature set to 20 C higher than - * IMX_TEMP_PASSIVE. + * Set the critical trip point at 5C under max + * Set the passive trip point at 10C under max (can change via sysfs) */ - data->temp_critical = 1000 * 20 + data->temp_passive; + data->temp_critical = data->temp_max - (1000 * 5); + data->temp_passive = data->temp_max - (1000 * 10); return 0; } @@ -551,6 +572,11 @@ static int imx_thermal_probe(struct platform_device *pdev) return ret; } + dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC" + " critical:%dC passive:%dC\n", data->temp_grade, + data->temp_max / 1000, data->temp_critical / 1000, + data->temp_passive / 1000); + /* Enable measurements at ~ 10 Hz */ regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 42b7d4253b94..be4eedcb839a 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -964,7 +964,7 @@ void of_thermal_destroy_zones(void) np = of_find_node_by_name(NULL, "thermal-zones"); if (!np) { - pr_err("unable to find thermal zones\n"); + pr_debug("unable to find thermal zones\n"); return; } diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index f0fbea386869..1246aa6fcab0 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -174,7 +174,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, /** * pid_controller() - PID controller * @tz: thermal zone we are operating in - * @current_temp: the current temperature in millicelsius * @control_temp: the target temperature in millicelsius * @max_allocatable_power: maximum allocatable power for this thermal zone * @@ -191,7 +190,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, * Return: The power budget for the next period. */ static u32 pid_controller(struct thermal_zone_device *tz, - int current_temp, int control_temp, u32 max_allocatable_power) { @@ -211,7 +209,7 @@ static u32 pid_controller(struct thermal_zone_device *tz, true); } - err = control_temp - current_temp; + err = control_temp - tz->temperature; err = int_to_frac(err); /* Calculate the proportional term */ @@ -332,7 +330,6 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, } static int allocate_power(struct thermal_zone_device *tz, - int current_temp, int control_temp) { struct thermal_instance *instance; @@ -418,8 +415,7 @@ static int allocate_power(struct thermal_zone_device *tz, i++; } - power_range = pid_controller(tz, current_temp, control_temp, - max_allocatable_power); + power_range = pid_controller(tz, control_temp, max_allocatable_power); divvy_up_power(weighted_req_power, max_power, num_actors, total_weighted_req_power, power_range, granted_power, @@ -444,8 +440,8 @@ static int allocate_power(struct thermal_zone_device *tz, trace_thermal_power_allocator(tz, req_power, total_req_power, granted_power, total_granted_power, num_actors, power_range, - max_allocatable_power, current_temp, - control_temp - current_temp); + max_allocatable_power, tz->temperature, + control_temp - tz->temperature); kfree(req_power); unlock: @@ -612,7 +608,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz) static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) { int ret; - int switch_on_temp, control_temp, current_temp; + int switch_on_temp, control_temp; struct power_allocator_params *params = tz->governor_data; /* @@ -622,15 +618,9 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) if (trip != params->trip_max_desired_temperature) return 0; - ret = thermal_zone_get_temp(tz, ¤t_temp); - if (ret) { - dev_warn(&tz->device, "Failed to get temperature: %d\n", ret); - return ret; - } - ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, &switch_on_temp); - if (!ret && (current_temp < switch_on_temp)) { + if (!ret && (tz->temperature < switch_on_temp)) { tz->passive = 0; reset_pid_controller(params); allow_maximum_power(tz); @@ -648,7 +638,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) return ret; } - return allocate_power(tz, current_temp, control_temp); + return allocate_power(tz, control_temp); } static struct thermal_governor thermal_gov_power_allocator = { diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 5d4ae7d705e0..13d01edc7a04 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -361,6 +361,24 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data) /* * platform functions */ +static int rcar_thermal_remove(struct platform_device *pdev) +{ + struct rcar_thermal_common *common = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + struct rcar_thermal_priv *priv; + + rcar_thermal_for_each_priv(priv, common) { + if (rcar_has_irq_support(priv)) + rcar_thermal_irq_disable(priv); + thermal_zone_device_unregister(priv->zone); + } + + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + static int rcar_thermal_probe(struct platform_device *pdev) { struct rcar_thermal_common *common; @@ -377,6 +395,8 @@ static int rcar_thermal_probe(struct platform_device *pdev) if (!common) return -ENOMEM; + platform_set_drvdata(pdev, common); + INIT_LIST_HEAD(&common->head); spin_lock_init(&common->lock); common->dev = dev; @@ -454,43 +474,16 @@ static int rcar_thermal_probe(struct platform_device *pdev) rcar_thermal_common_write(common, ENR, enr_bits); } - platform_set_drvdata(pdev, common); - dev_info(dev, "%d sensor probed\n", i); return 0; error_unregister: - rcar_thermal_for_each_priv(priv, common) { - if (rcar_has_irq_support(priv)) - rcar_thermal_irq_disable(priv); - thermal_zone_device_unregister(priv->zone); - } - - pm_runtime_put(dev); - pm_runtime_disable(dev); + rcar_thermal_remove(pdev); return ret; } -static int rcar_thermal_remove(struct platform_device *pdev) -{ - struct rcar_thermal_common *common = platform_get_drvdata(pdev); - struct device *dev = &pdev->dev; - struct rcar_thermal_priv *priv; - - rcar_thermal_for_each_priv(priv, common) { - if (rcar_has_irq_support(priv)) - rcar_thermal_irq_disable(priv); - thermal_zone_device_unregister(priv->zone); - } - - pm_runtime_put(dev); - pm_runtime_disable(dev); - - return 0; -} - static const struct of_device_id rcar_thermal_dt_ids[] = { { .compatible = "renesas,rcar-thermal", }, {}, diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 9787e8aa509f..e845841ab036 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -1,6 +1,9 @@ /* * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd * + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd + * Caesar Wang <wxt@rock-chips.com> + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -45,17 +48,50 @@ enum tshut_polarity { }; /** - * The system has three Temperature Sensors. channel 0 is reserved, - * channel 1 is for CPU, and channel 2 is for GPU. + * The system has two Temperature Sensors. + * sensor0 is for CPU, and sensor1 is for GPU. */ enum sensor_id { - SENSOR_CPU = 1, + SENSOR_CPU = 0, SENSOR_GPU, }; +/** +* The conversion table has the adc value and temperature. +* ADC_DECREMENT is the adc value decremnet.(e.g. v2_code_table) +* ADC_INCREMNET is the adc value incremnet.(e.g. v3_code_table) +*/ +enum adc_sort_mode { + ADC_DECREMENT = 0, + ADC_INCREMENT, +}; + +/** + * The max sensors is two in rockchip SoCs. + * Two sensors: CPU and GPU sensor. + */ +#define SOC_MAX_SENSORS 2 + +struct chip_tsadc_table { + const struct tsadc_table *id; + + /* the array table size*/ + unsigned int length; + + /* that analogic mask data */ + u32 data_mask; + + /* the sort mode is adc value that increment or decrement in table */ + enum adc_sort_mode mode; +}; + struct rockchip_tsadc_chip { + /* The sensor id of chip correspond to the ADC channel */ + int chn_id[SOC_MAX_SENSORS]; + int chn_num; + /* The hardware-controlled tshut property */ - long tshut_temp; + int tshut_temp; enum tshut_mode tshut_mode; enum tshut_polarity tshut_polarity; @@ -65,37 +101,40 @@ struct rockchip_tsadc_chip { void (*control)(void __iomem *reg, bool on); /* Per-sensor methods */ - int (*get_temp)(int chn, void __iomem *reg, int *temp); - void (*set_tshut_temp)(int chn, void __iomem *reg, long temp); + int (*get_temp)(struct chip_tsadc_table table, + int chn, void __iomem *reg, int *temp); + void (*set_tshut_temp)(struct chip_tsadc_table table, + int chn, void __iomem *reg, int temp); void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); + + /* Per-table methods */ + struct chip_tsadc_table table; }; struct rockchip_thermal_sensor { struct rockchip_thermal_data *thermal; struct thermal_zone_device *tzd; - enum sensor_id id; + int id; }; -#define NUM_SENSORS 2 /* Ignore unused sensor 0 */ - struct rockchip_thermal_data { const struct rockchip_tsadc_chip *chip; struct platform_device *pdev; struct reset_control *reset; - struct rockchip_thermal_sensor sensors[NUM_SENSORS]; + struct rockchip_thermal_sensor sensors[SOC_MAX_SENSORS]; struct clk *clk; struct clk *pclk; void __iomem *regs; - long tshut_temp; + int tshut_temp; enum tshut_mode tshut_mode; enum tshut_polarity tshut_polarity; }; -/* TSADC V2 Sensor info define: */ +/* TSADC Sensor info define: */ #define TSADCV2_AUTO_CON 0x04 #define TSADCV2_INT_EN 0x08 #define TSADCV2_INT_PD 0x0c @@ -117,6 +156,8 @@ struct rockchip_thermal_data { #define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8) #define TSADCV2_DATA_MASK 0xfff +#define TSADCV3_DATA_MASK 0x3ff + #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 #define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ @@ -124,7 +165,7 @@ struct rockchip_thermal_data { struct tsadc_table { u32 code; - long temp; + int temp; }; static const struct tsadc_table v2_code_table[] = { @@ -165,21 +206,61 @@ static const struct tsadc_table v2_code_table[] = { {3421, 125000}, }; -static u32 rk_tsadcv2_temp_to_code(long temp) +static const struct tsadc_table v3_code_table[] = { + {0, -40000}, + {106, -40000}, + {108, -35000}, + {110, -30000}, + {112, -25000}, + {114, -20000}, + {116, -15000}, + {118, -10000}, + {120, -5000}, + {122, 0}, + {124, 5000}, + {126, 10000}, + {128, 15000}, + {130, 20000}, + {132, 25000}, + {134, 30000}, + {136, 35000}, + {138, 40000}, + {140, 45000}, + {142, 50000}, + {144, 55000}, + {146, 60000}, + {148, 65000}, + {150, 70000}, + {152, 75000}, + {154, 80000}, + {156, 85000}, + {158, 90000}, + {160, 95000}, + {162, 100000}, + {163, 105000}, + {165, 110000}, + {167, 115000}, + {169, 120000}, + {171, 125000}, + {TSADCV3_DATA_MASK, 125000}, +}; + +static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, + int temp) { int high, low, mid; low = 0; - high = ARRAY_SIZE(v2_code_table) - 1; + high = table.length - 1; mid = (high + low) / 2; - if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp) + if (temp < table.id[low].temp || temp > table.id[high].temp) return 0; while (low <= high) { - if (temp == v2_code_table[mid].temp) - return v2_code_table[mid].code; - else if (temp < v2_code_table[mid].temp) + if (temp == table.id[mid].temp) + return table.id[mid].code; + else if (temp < table.id[mid].temp) high = mid - 1; else low = mid + 1; @@ -189,29 +270,54 @@ static u32 rk_tsadcv2_temp_to_code(long temp) return 0; } -static int rk_tsadcv2_code_to_temp(u32 code, int *temp) +static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, + int *temp) { unsigned int low = 1; - unsigned int high = ARRAY_SIZE(v2_code_table) - 1; + unsigned int high = table.length - 1; unsigned int mid = (low + high) / 2; unsigned int num; unsigned long denom; - BUILD_BUG_ON(ARRAY_SIZE(v2_code_table) < 2); - - code &= TSADCV2_DATA_MASK; - if (code < v2_code_table[high].code) - return -EAGAIN; /* Incorrect reading */ - - while (low <= high) { - if (code >= v2_code_table[mid].code && - code < v2_code_table[mid - 1].code) - break; - else if (code < v2_code_table[mid].code) - low = mid + 1; - else - high = mid - 1; - mid = (low + high) / 2; + WARN_ON(table.length < 2); + + switch (table.mode) { + case ADC_DECREMENT: + code &= table.data_mask; + if (code < table.id[high].code) + return -EAGAIN; /* Incorrect reading */ + + while (low <= high) { + if (code >= table.id[mid].code && + code < table.id[mid - 1].code) + break; + else if (code < table.id[mid].code) + low = mid + 1; + else + high = mid - 1; + + mid = (low + high) / 2; + } + break; + case ADC_INCREMENT: + code &= table.data_mask; + if (code < table.id[low].code) + return -EAGAIN; /* Incorrect reading */ + + while (low <= high) { + if (code >= table.id[mid - 1].code && + code < table.id[mid].code) + break; + else if (code > table.id[mid].code) + low = mid + 1; + else + high = mid - 1; + + mid = (low + high) / 2; + } + break; + default: + pr_err("Invalid the conversion table\n"); } /* @@ -220,24 +326,28 @@ static int rk_tsadcv2_code_to_temp(u32 code, int *temp) * temperature between 2 table entries is linear and interpolate * to produce less granular result. */ - num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp; - num *= v2_code_table[mid - 1].code - code; - denom = v2_code_table[mid - 1].code - v2_code_table[mid].code; - *temp = v2_code_table[mid - 1].temp + (num / denom); + num = table.id[mid].temp - v2_code_table[mid - 1].temp; + num *= abs(table.id[mid - 1].code - code); + denom = abs(table.id[mid - 1].code - table.id[mid].code); + *temp = table.id[mid - 1].temp + (num / denom); return 0; } /** - * rk_tsadcv2_initialize - initialize TASDC Controller - * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between - * every two accessing of TSADC in normal operation. - * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between - * every two accessing of TSADC after the temperature is higher - * than COM_SHUT or COM_INT. - * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE, - * if the temperature is higher than COMP_INT or COMP_SHUT for - * "debounce" times, TSADC controller will generate interrupt or TSHUT. + * rk_tsadcv2_initialize - initialize TASDC Controller. + * + * (1) Set TSADC_V2_AUTO_PERIOD: + * Configure the interleave between every two accessing of + * TSADC in normal operation. + * + * (2) Set TSADCV2_AUTO_PERIOD_HT: + * Configure the interleave between every two accessing of + * TSADC after the temperature is higher than COM_SHUT or COM_INT. + * + * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE: + * If the temperature is higher than COMP_INT or COMP_SHUT for + * "debounce" times, TSADC controller will generate interrupt or TSHUT. */ static void rk_tsadcv2_initialize(void __iomem *regs, enum tshut_polarity tshut_polarity) @@ -279,20 +389,22 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable) writel_relaxed(val, regs + TSADCV2_AUTO_CON); } -static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp) +static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, + int chn, void __iomem *regs, int *temp) { u32 val; val = readl_relaxed(regs + TSADCV2_DATA(chn)); - return rk_tsadcv2_code_to_temp(val, temp); + return rk_tsadcv2_code_to_temp(table, val, temp); } -static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp) +static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, + int chn, void __iomem *regs, int temp) { u32 tshut_value, val; - tshut_value = rk_tsadcv2_temp_to_code(temp); + tshut_value = rk_tsadcv2_temp_to_code(table, temp); writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); /* TSHUT will be valid */ @@ -318,6 +430,10 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, } static const struct rockchip_tsadc_chip rk3288_tsadc_data = { + .chn_id[SENSOR_CPU] = 1, /* cpu sensor is channel 1 */ + .chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */ + .chn_num = 2, /* two channels for tsadc */ + .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, @@ -328,6 +444,37 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = { .get_temp = rk_tsadcv2_get_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, + + .table = { + .id = v2_code_table, + .length = ARRAY_SIZE(v2_code_table), + .data_mask = TSADCV2_DATA_MASK, + .mode = ADC_DECREMENT, + }, +}; + +static const struct rockchip_tsadc_chip rk3368_tsadc_data = { + .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */ + .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */ + .chn_num = 2, /* two channels for tsadc */ + + .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ + .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ + .tshut_temp = 95000, + + .initialize = rk_tsadcv2_initialize, + .irq_ack = rk_tsadcv2_irq_ack, + .control = rk_tsadcv2_control, + .get_temp = rk_tsadcv2_get_temp, + .set_tshut_temp = rk_tsadcv2_tshut_temp, + .set_tshut_mode = rk_tsadcv2_tshut_mode, + + .table = { + .id = v3_code_table, + .length = ARRAY_SIZE(v3_code_table), + .data_mask = TSADCV3_DATA_MASK, + .mode = ADC_INCREMENT, + }, }; static const struct of_device_id of_rockchip_thermal_match[] = { @@ -335,6 +482,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = { .compatible = "rockchip,rk3288-tsadc", .data = (void *)&rk3288_tsadc_data, }, + { + .compatible = "rockchip,rk3368-tsadc", + .data = (void *)&rk3368_tsadc_data, + }, { /* end */ }, }; MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match); @@ -357,7 +508,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev) thermal->chip->irq_ack(thermal->regs); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) thermal_zone_device_update(thermal->sensors[i].tzd); return IRQ_HANDLED; @@ -370,7 +521,8 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; int retval; - retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp); + retval = tsadc->get_temp(tsadc->table, + sensor->id, thermal->regs, out_temp); dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", sensor->id, *out_temp, retval); @@ -389,7 +541,7 @@ static int rockchip_configure_from_dt(struct device *dev, if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) { dev_warn(dev, - "Missing tshut temp property, using default %ld\n", + "Missing tshut temp property, using default %d\n", thermal->chip->tshut_temp); thermal->tshut_temp = thermal->chip->tshut_temp; } else { @@ -397,7 +549,7 @@ static int rockchip_configure_from_dt(struct device *dev, } if (thermal->tshut_temp > INT_MAX) { - dev_err(dev, "Invalid tshut temperature specified: %ld\n", + dev_err(dev, "Invalid tshut temperature specified: %d\n", thermal->tshut_temp); return -ERANGE; } @@ -442,13 +594,14 @@ static int rockchip_thermal_register_sensor(struct platform_device *pdev, struct rockchip_thermal_data *thermal, struct rockchip_thermal_sensor *sensor, - enum sensor_id id) + int id) { const struct rockchip_tsadc_chip *tsadc = thermal->chip; int error; tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp); + tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, + thermal->tshut_temp); sensor->thermal = thermal; sensor->id = id; @@ -481,7 +634,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev) const struct of_device_id *match; struct resource *res; int irq; - int i; + int i, j; int error; match = of_match_node(of_rockchip_thermal_match, np); @@ -556,22 +709,19 @@ static int rockchip_thermal_probe(struct platform_device *pdev) thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); - error = rockchip_thermal_register_sensor(pdev, thermal, - &thermal->sensors[0], - SENSOR_CPU); - if (error) { - dev_err(&pdev->dev, - "failed to register CPU thermal sensor: %d\n", error); - goto err_disable_pclk; - } - - error = rockchip_thermal_register_sensor(pdev, thermal, - &thermal->sensors[1], - SENSOR_GPU); - if (error) { - dev_err(&pdev->dev, - "failed to register GPU thermal sensor: %d\n", error); - goto err_unregister_cpu_sensor; + for (i = 0; i < thermal->chip->chn_num; i++) { + error = rockchip_thermal_register_sensor(pdev, thermal, + &thermal->sensors[i], + thermal->chip->chn_id[i]); + if (error) { + dev_err(&pdev->dev, + "failed to register sensor[%d] : error = %d\n", + i, error); + for (j = 0; j < i; j++) + thermal_zone_of_sensor_unregister(&pdev->dev, + thermal->sensors[j].tzd); + goto err_disable_pclk; + } } error = devm_request_threaded_irq(&pdev->dev, irq, NULL, @@ -581,22 +731,23 @@ static int rockchip_thermal_probe(struct platform_device *pdev) if (error) { dev_err(&pdev->dev, "failed to request tsadc irq: %d\n", error); - goto err_unregister_gpu_sensor; + goto err_unregister_sensor; } thermal->chip->control(thermal->regs, true); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); platform_set_drvdata(pdev, thermal); return 0; -err_unregister_gpu_sensor: - thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); -err_unregister_cpu_sensor: - thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); +err_unregister_sensor: + while (i--) + thermal_zone_of_sensor_unregister(&pdev->dev, + thermal->sensors[i].tzd); + err_disable_pclk: clk_disable_unprepare(thermal->pclk); err_disable_clk: @@ -610,7 +761,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev) struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); int i; - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { + for (i = 0; i < thermal->chip->chn_num; i++) { struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; rockchip_thermal_toggle_sensor(sensor, false); @@ -631,7 +782,7 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev) struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); int i; - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], false); thermal->chip->control(thermal->regs, false); @@ -663,18 +814,19 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { - enum sensor_id id = thermal->sensors[i].id; + for (i = 0; i < thermal->chip->chn_num; i++) { + int id = thermal->sensors[i].id; thermal->chip->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - thermal->chip->set_tshut_temp(id, thermal->regs, + thermal->chip->set_tshut_temp(thermal->chip->table, + id, thermal->regs, thermal->tshut_temp); } thermal->chip->control(thermal->regs, true); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); pinctrl_pm_select_default_state(dev); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 7a8a6c6952e9..1c427beffadd 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -446,7 +446,7 @@ config MAX63XX_WATCHDOG config IMX2_WDT tristate "IMX2+ Watchdog" - depends on ARCH_MXC + depends on ARCH_MXC || ARCH_LAYERSCAPE select REGMAP_MMIO select WATCHDOG_CORE help diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index 6ad9df948711..b751f43d76ed 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -123,6 +123,7 @@ static int mtk_wdt_stop(struct watchdog_device *wdt_dev) reg = readl(wdt_base + WDT_MODE); reg &= ~WDT_MODE_EN; + reg |= WDT_MODE_KEY; iowrite32(reg, wdt_base + WDT_MODE); return 0; diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index d96bee017fd3..6f17c935a6cf 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -205,7 +205,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog, static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog) { - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog); void __iomem *base = wdev->base; u32 value; diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index 4224b3ec83a5..313cd1c6fda0 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -80,7 +80,7 @@ static unsigned int heartbeat = DEFAULT_HEARTBEAT; static DEFINE_SPINLOCK(io_lock); static void __iomem *wdt_base; -struct clk *wdt_clk; +static struct clk *wdt_clk; static int pnx4008_wdt_start(struct watchdog_device *wdd) { @@ -161,7 +161,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev) if (IS_ERR(wdt_clk)) return PTR_ERR(wdt_clk); - ret = clk_enable(wdt_clk); + ret = clk_prepare_enable(wdt_clk); if (ret) return ret; @@ -184,7 +184,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev) return 0; disable_clk: - clk_disable(wdt_clk); + clk_disable_unprepare(wdt_clk); return ret; } @@ -192,7 +192,7 @@ static int pnx4008_wdt_remove(struct platform_device *pdev) { watchdog_unregister_device(&pnx4008_wdd); - clk_disable(wdt_clk); + clk_disable_unprepare(wdt_clk); return 0; } diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c index 7f97cdd53f29..9ec57608da82 100644 --- a/drivers/watchdog/tegra_wdt.c +++ b/drivers/watchdog/tegra_wdt.c @@ -140,8 +140,10 @@ static int tegra_wdt_set_timeout(struct watchdog_device *wdd, { wdd->timeout = timeout; - if (watchdog_active(wdd)) + if (watchdog_active(wdd)) { + tegra_wdt_stop(wdd); return tegra_wdt_start(wdd); + } return 0; } diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 91bf55a20024..20e2bba10400 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -224,7 +224,7 @@ static int wdt_keepalive(void) static int wdt_set_timeout(int t) { - int tmrval; + unsigned int tmrval; /* * Convert seconds to watchdog counter time units, rounding up. diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 849500e4e14d..524c22146429 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -39,6 +39,7 @@ #include <asm/irq.h> #include <asm/idle.h> #include <asm/io_apic.h> +#include <asm/i8259.h> #include <asm/xen/pci.h> #endif #include <asm/sync_bitops.h> @@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) return xen_allocate_irq_dynamic(); /* Legacy IRQ descriptors are already allocated by the arch. */ - if (gsi < NR_IRQS_LEGACY) + if (gsi < nr_legacy_irqs()) irq = gsi; else irq = irq_alloc_desc_at(gsi, -1); @@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq) kfree(info); /* Legacy IRQ descriptors are managed by the arch. */ - if (irq < NR_IRQS_LEGACY) + if (irq < nr_legacy_irqs()) return; irq_free_desc(irq); diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 00f40f051d95..38272ad24551 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -49,6 +49,8 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/cpu.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> #include <xen/xen.h> #include <xen/events.h> @@ -58,10 +60,10 @@ struct per_user_data { struct mutex bind_mutex; /* serialize bind/unbind operations */ struct rb_root evtchns; + unsigned int nr_evtchns; /* Notification ring, accessed via /dev/xen/evtchn. */ -#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) -#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) + unsigned int ring_size; evtchn_port_t *ring; unsigned int ring_cons, ring_prod, ring_overflow; struct mutex ring_cons_mutex; /* protect against concurrent readers */ @@ -80,10 +82,41 @@ struct user_evtchn { bool enabled; }; +static evtchn_port_t *evtchn_alloc_ring(unsigned int size) +{ + evtchn_port_t *ring; + size_t s = size * sizeof(*ring); + + ring = kmalloc(s, GFP_KERNEL); + if (!ring) + ring = vmalloc(s); + + return ring; +} + +static void evtchn_free_ring(evtchn_port_t *ring) +{ + kvfree(ring); +} + +static unsigned int evtchn_ring_offset(struct per_user_data *u, + unsigned int idx) +{ + return idx & (u->ring_size - 1); +} + +static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u, + unsigned int idx) +{ + return u->ring + evtchn_ring_offset(u, idx); +} + static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) { struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; + u->nr_evtchns++; + while (*new) { struct user_evtchn *this; @@ -107,6 +140,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) { + u->nr_evtchns--; rb_erase(&evtchn->node, &u->evtchns); kfree(evtchn); } @@ -144,8 +178,8 @@ static irqreturn_t evtchn_interrupt(int irq, void *data) spin_lock(&u->ring_prod_lock); - if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { - u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; + if ((u->ring_prod - u->ring_cons) < u->ring_size) { + *evtchn_ring_entry(u, u->ring_prod) = evtchn->port; wmb(); /* Ensure ring contents visible */ if (u->ring_cons == u->ring_prod++) { wake_up_interruptible(&u->evtchn_wait); @@ -200,10 +234,10 @@ static ssize_t evtchn_read(struct file *file, char __user *buf, } /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ - if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { - bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * + if (((c ^ p) & u->ring_size) != 0) { + bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) * sizeof(evtchn_port_t); - bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); + bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t); } else { bytes1 = (p - c) * sizeof(evtchn_port_t); bytes2 = 0; @@ -219,7 +253,7 @@ static ssize_t evtchn_read(struct file *file, char __user *buf, rc = -EFAULT; rmb(); /* Ensure that we see the port before we copy it. */ - if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || + if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) || ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) goto unlock_out; @@ -278,6 +312,66 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, return rc; } +static int evtchn_resize_ring(struct per_user_data *u) +{ + unsigned int new_size; + evtchn_port_t *new_ring, *old_ring; + unsigned int p, c; + + /* + * Ensure the ring is large enough to capture all possible + * events. i.e., one free slot for each bound event. + */ + if (u->nr_evtchns <= u->ring_size) + return 0; + + if (u->ring_size == 0) + new_size = 64; + else + new_size = 2 * u->ring_size; + + new_ring = evtchn_alloc_ring(new_size); + if (!new_ring) + return -ENOMEM; + + old_ring = u->ring; + + /* + * Access to the ring contents is serialized by either the + * prod /or/ cons lock so take both when resizing. + */ + mutex_lock(&u->ring_cons_mutex); + spin_lock_irq(&u->ring_prod_lock); + + /* + * Copy the old ring contents to the new ring. + * + * If the ring contents crosses the end of the current ring, + * it needs to be copied in two chunks. + * + * +---------+ +------------------+ + * |34567 12| -> | 1234567 | + * +-----p-c-+ +------------------+ + */ + p = evtchn_ring_offset(u, u->ring_prod); + c = evtchn_ring_offset(u, u->ring_cons); + if (p < c) { + memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring)); + memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring)); + } else + memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring)); + + u->ring = new_ring; + u->ring_size = new_size; + + spin_unlock_irq(&u->ring_prod_lock); + mutex_unlock(&u->ring_cons_mutex); + + evtchn_free_ring(old_ring); + + return 0; +} + static int evtchn_bind_to_user(struct per_user_data *u, int port) { struct user_evtchn *evtchn; @@ -305,6 +399,10 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) if (rc < 0) goto err; + rc = evtchn_resize_ring(u); + if (rc < 0) + goto err; + rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, u->name, evtchn); if (rc < 0) @@ -503,13 +601,6 @@ static int evtchn_open(struct inode *inode, struct file *filp) init_waitqueue_head(&u->evtchn_wait); - u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL); - if (u->ring == NULL) { - kfree(u->name); - kfree(u); - return -ENOMEM; - } - mutex_init(&u->bind_mutex); mutex_init(&u->ring_cons_mutex); spin_lock_init(&u->ring_prod_lock); @@ -532,7 +623,7 @@ static int evtchn_release(struct inode *inode, struct file *filp) evtchn_unbind_from_user(u, evtchn); } - free_page((unsigned long)u->ring); + evtchn_free_ring(u->ring); kfree(u->name); kfree(u); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2ea0b3b2a91d..1be5dd048622 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) vma->vm_ops = &gntdev_vmops; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; if (use_ptemod) vma->vm_flags |= VM_DONTCOPY; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 6dcdb2ec9211..d453d62ab0c6 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -355,7 +355,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, index = srcu_read_lock(&fs_info->subvol_srcu); - root = btrfs_read_fs_root_no_name(fs_info, &root_key); + root = btrfs_get_fs_root(fs_info, &root_key, false); if (IS_ERR(root)) { srcu_read_unlock(&fs_info->subvol_srcu, index); ret = PTR_ERR(root); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8c58191249cc..35489e7129a7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3416,6 +3416,7 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr); +void btrfs_get_block_group(struct btrfs_block_group_cache *cache); void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int get_block_group_index(struct btrfs_block_group_cache *cache); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, @@ -3479,6 +3480,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytes_used, u64 type, u64 chunk_objectid, u64 chunk_offset, u64 size); +struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( + struct btrfs_fs_info *fs_info, + const u64 chunk_offset); int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 group_start, struct extent_map *em); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index acf3ed11cfb6..4b89680a1923 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -124,7 +124,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) return (cache->flags & bits) == bits; } -static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) +void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { atomic_inc(&cache->count); } @@ -5915,19 +5915,6 @@ static int update_block_group(struct btrfs_trans_handle *trans, set_extent_dirty(info->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); - /* - * No longer have used bytes in this block group, queue - * it for deletion. - */ - if (old_val == 0) { - spin_lock(&info->unused_bgs_lock); - if (list_empty(&cache->bg_list)) { - btrfs_get_block_group(cache); - list_add_tail(&cache->bg_list, - &info->unused_bgs); - } - spin_unlock(&info->unused_bgs_lock); - } } spin_lock(&trans->transaction->dirty_bgs_lock); @@ -5939,6 +5926,22 @@ static int update_block_group(struct btrfs_trans_handle *trans, } spin_unlock(&trans->transaction->dirty_bgs_lock); + /* + * No longer have used bytes in this block group, queue it for + * deletion. We do this after adding the block group to the + * dirty list to avoid races between cleaner kthread and space + * cache writeout. + */ + if (!alloc && old_val == 0) { + spin_lock(&info->unused_bgs_lock); + if (list_empty(&cache->bg_list)) { + btrfs_get_block_group(cache); + list_add_tail(&cache->bg_list, + &info->unused_bgs); + } + spin_unlock(&info->unused_bgs_lock); + } + btrfs_put_block_group(cache); total -= num_bytes; bytenr += num_bytes; @@ -8105,21 +8108,47 @@ reada: } /* - * TODO: Modify related function to add related node/leaf to dirty_extent_root, - * for later qgroup accounting. - * - * Current, this function does nothing. + * These may not be seen by the usual inc/dec ref code so we have to + * add them here. */ +static int record_one_subtree_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes) +{ + struct btrfs_qgroup_extent_record *qrecord; + struct btrfs_delayed_ref_root *delayed_refs; + + qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS); + if (!qrecord) + return -ENOMEM; + + qrecord->bytenr = bytenr; + qrecord->num_bytes = num_bytes; + qrecord->old_roots = NULL; + + delayed_refs = &trans->transaction->delayed_refs; + spin_lock(&delayed_refs->lock); + if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord)) + kfree(qrecord); + spin_unlock(&delayed_refs->lock); + + return 0; +} + static int account_leaf_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *eb) { int nr = btrfs_header_nritems(eb); - int i, extent_type; + int i, extent_type, ret; struct btrfs_key key; struct btrfs_file_extent_item *fi; u64 bytenr, num_bytes; + /* We can be called directly from walk_up_proc() */ + if (!root->fs_info->quota_enabled) + return 0; + for (i = 0; i < nr; i++) { btrfs_item_key_to_cpu(eb, &key, i); @@ -8138,6 +8167,10 @@ static int account_leaf_items(struct btrfs_trans_handle *trans, continue; num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); + + ret = record_one_subtree_extent(trans, root, bytenr, num_bytes); + if (ret) + return ret; } return 0; } @@ -8206,8 +8239,6 @@ static int adjust_slots_upwards(struct btrfs_root *root, /* * root_eb is the subtree root and is locked before this function is called. - * TODO: Modify this function to mark all (including complete shared node) - * to dirty_extent_root to allow it get accounted in qgroup. */ static int account_shared_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -8285,6 +8316,11 @@ walk_down: btrfs_tree_read_lock(eb); btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); path->locks[level] = BTRFS_READ_LOCK_BLOCKING; + + ret = record_one_subtree_extent(trans, root, child_bytenr, + root->nodesize); + if (ret) + goto out; } if (level == 0) { @@ -10256,6 +10292,47 @@ out: return ret; } +struct btrfs_trans_handle * +btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info, + const u64 chunk_offset) +{ + struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree; + struct extent_map *em; + struct map_lookup *map; + unsigned int num_items; + + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, chunk_offset, 1); + read_unlock(&em_tree->lock); + ASSERT(em && em->start == chunk_offset); + + /* + * We need to reserve 3 + N units from the metadata space info in order + * to remove a block group (done at btrfs_remove_chunk() and at + * btrfs_remove_block_group()), which are used for: + * + * 1 unit for adding the free space inode's orphan (located in the tree + * of tree roots). + * 1 unit for deleting the block group item (located in the extent + * tree). + * 1 unit for deleting the free space item (located in tree of tree + * roots). + * N units for deleting N device extent items corresponding to each + * stripe (located in the device tree). + * + * In order to remove a block group we also need to reserve units in the + * system space info in order to update the chunk tree (update one or + * more device items and remove one chunk item), but this is done at + * btrfs_remove_chunk() through a call to check_system_chunk(). + */ + map = (struct map_lookup *)em->bdev; + num_items = 3 + map->num_stripes; + free_extent_map(em); + + return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, + num_items, 1); +} + /* * Process the unused_bgs list and remove any that don't have any allocated * space inside of them. @@ -10322,8 +10399,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * Want to do this before we do anything else so we can recover * properly if we fail to join the transaction. */ - /* 1 for btrfs_orphan_reserve_metadata() */ - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_trans_remove_block_group(fs_info, + block_group->key.objectid); if (IS_ERR(trans)) { btrfs_dec_block_group_ro(root, block_group); ret = PTR_ERR(trans); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 977e715f0bf2..72e73461c064 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1882,8 +1882,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) struct btrfs_log_ctx ctx; int ret = 0; bool full_sync = 0; - const u64 len = end - start + 1; + u64 len; + /* + * The range length can be represented by u64, we have to do the typecasts + * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync() + */ + len = (u64)end - (u64)start + 1; trace_btrfs_sync_file(file, datasync); /* @@ -2071,8 +2076,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) } } if (!full_sync) { - ret = btrfs_wait_ordered_range(inode, start, - end - start + 1); + ret = btrfs_wait_ordered_range(inode, start, len); if (ret) { btrfs_end_transaction(trans, root); goto out; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 994490d5fa64..a70c5790f8f5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4046,9 +4046,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, */ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) { - struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; - int ret; /* * 1 for the possible orphan item @@ -4057,27 +4055,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) * 1 for the inode ref * 1 for the inode */ - trans = btrfs_start_transaction(root, 5); - if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) - return trans; - - if (PTR_ERR(trans) == -ENOSPC) { - u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); - - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return trans; - ret = btrfs_cond_migrate_bytes(root->fs_info, - &root->fs_info->trans_block_rsv, - num_bytes, 5); - if (ret) { - btrfs_end_transaction(trans, root); - return ERR_PTR(ret); - } - trans->block_rsv = &root->fs_info->trans_block_rsv; - trans->bytes_reserved = num_bytes; - } - return trans; + return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); } static int btrfs_unlink(struct inode *dir, struct dentry *dentry) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 93e12c18ffd7..5279fdae7142 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -993,9 +993,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans, mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) goto out; - spin_lock(&fs_info->qgroup_lock); fs_info->quota_enabled = 0; fs_info->pending_quota_state = 0; + btrfs_qgroup_wait_for_completion(fs_info); + spin_lock(&fs_info->qgroup_lock); quota_root = fs_info->quota_root; fs_info->quota_root = NULL; fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; @@ -1461,6 +1462,8 @@ struct btrfs_qgroup_extent_record struct btrfs_qgroup_extent_record *entry; u64 bytenr = record->bytenr; + assert_spin_locked(&delayed_refs->lock); + while (*p) { parent_node = *p; entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2907a77fb1f6..b091d94ceef6 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3432,7 +3432,9 @@ out: static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 chunk_offset, u64 length, - u64 dev_offset, int is_dev_replace) + u64 dev_offset, + struct btrfs_block_group_cache *cache, + int is_dev_replace) { struct btrfs_mapping_tree *map_tree = &sctx->dev_root->fs_info->mapping_tree; @@ -3445,8 +3447,18 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); read_unlock(&map_tree->map_tree.lock); - if (!em) - return -EINVAL; + if (!em) { + /* + * Might have been an unused block group deleted by the cleaner + * kthread or relocation. + */ + spin_lock(&cache->lock); + if (!cache->removed) + ret = -EINVAL; + spin_unlock(&cache->lock); + + return ret; + } map = (struct map_lookup *)em->bdev; if (em->start != chunk_offset) @@ -3483,6 +3495,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, u64 length; u64 chunk_offset; int ret = 0; + int ro_set; int slot; struct extent_buffer *l; struct btrfs_key key; @@ -3568,7 +3581,21 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, scrub_pause_on(fs_info); ret = btrfs_inc_block_group_ro(root, cache); scrub_pause_off(fs_info); - if (ret) { + + if (ret == 0) { + ro_set = 1; + } else if (ret == -ENOSPC) { + /* + * btrfs_inc_block_group_ro return -ENOSPC when it + * failed in creating new chunk for metadata. + * It is not a problem for scrub/replace, because + * metadata are always cowed, and our scrub paused + * commit_transactions. + */ + ro_set = 0; + } else { + btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n", + ret); btrfs_put_block_group(cache); break; } @@ -3577,7 +3604,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, - found_key.offset, is_dev_replace); + found_key.offset, cache, is_dev_replace); /* * flush, submit all pending read and write bios, afterwards @@ -3611,7 +3638,30 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, scrub_pause_off(fs_info); - btrfs_dec_block_group_ro(root, cache); + if (ro_set) + btrfs_dec_block_group_ro(root, cache); + + /* + * We might have prevented the cleaner kthread from deleting + * this block group if it was already unused because we raced + * and set it to RO mode first. So add it back to the unused + * list, otherwise it might not ever be deleted unless a manual + * balance is triggered or it becomes used and unused again. + */ + spin_lock(&cache->lock); + if (!cache->removed && !cache->ro && cache->reserved == 0 && + btrfs_block_group_used(&cache->item) == 0) { + spin_unlock(&cache->lock); + spin_lock(&fs_info->unused_bgs_lock); + if (list_empty(&cache->bg_list)) { + btrfs_get_block_group(cache); + list_add_tail(&cache->bg_list, + &fs_info->unused_bgs); + } + spin_unlock(&fs_info->unused_bgs_lock); + } else { + spin_unlock(&cache->lock); + } btrfs_put_block_group(cache); if (ret) diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index c8c3d70c31ff..8b72b005bfb9 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c @@ -898,8 +898,10 @@ int btrfs_test_free_space_cache(void) } root = btrfs_alloc_dummy_root(); - if (!root) + if (IS_ERR(root)) { + ret = PTR_ERR(root); goto out; + } root->fs_info = btrfs_alloc_dummy_fs_info(); if (!root->fs_info) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 418c6a2ad7d8..3367a3c6f214 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -592,6 +592,38 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, return start_transaction(root, num_items, TRANS_START, BTRFS_RESERVE_FLUSH_ALL); } +struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( + struct btrfs_root *root, + unsigned int num_items, + int min_factor) +{ + struct btrfs_trans_handle *trans; + u64 num_bytes; + int ret; + + trans = btrfs_start_transaction(root, num_items); + if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) + return trans; + + trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return trans; + + num_bytes = btrfs_calc_trans_metadata_size(root, num_items); + ret = btrfs_cond_migrate_bytes(root->fs_info, + &root->fs_info->trans_block_rsv, + num_bytes, + min_factor); + if (ret) { + btrfs_end_transaction(trans, root); + return ERR_PTR(ret); + } + + trans->block_rsv = &root->fs_info->trans_block_rsv; + trans->bytes_reserved = num_bytes; + + return trans; +} struct btrfs_trans_handle *btrfs_start_transaction_lflush( struct btrfs_root *root, diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index b05b2f64d913..0da21ca9b3fb 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -185,6 +185,10 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, unsigned int num_items); +struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( + struct btrfs_root *root, + unsigned int num_items, + int min_factor); struct btrfs_trans_handle *btrfs_start_transaction_lflush( struct btrfs_root *root, unsigned int num_items); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a6df8fdc1312..456452206609 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1973,8 +1973,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, if (srcdev->writeable) { fs_devices->rw_devices--; /* zero out the old super if it is writable */ - btrfs_scratch_superblocks(srcdev->bdev, - rcu_str_deref(srcdev->name)); + btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); } if (srcdev->bdev) @@ -2024,8 +2023,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev); if (tgtdev->bdev) { - btrfs_scratch_superblocks(tgtdev->bdev, - rcu_str_deref(tgtdev->name)); + btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); fs_info->fs_devices->open_devices--; } fs_info->fs_devices->num_devices--; @@ -2853,7 +2851,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset) if (ret) return ret; - trans = btrfs_start_transaction(root, 0); + trans = btrfs_start_trans_remove_block_group(root->fs_info, + chunk_offset); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_std_error(root->fs_info, ret, NULL); @@ -3123,7 +3122,7 @@ static int chunk_profiles_filter(u64 chunk_type, return 1; } -static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, +static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { struct btrfs_block_group_cache *cache; @@ -3156,7 +3155,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, return ret; } -static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, +static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { struct btrfs_block_group_cache *cache; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index ec5712372732..d5c84f6b1353 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -382,7 +382,7 @@ struct map_lookup { #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) #define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6) #define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7) -#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 8) +#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10) #define BTRFS_BALANCE_ARGS_MASK \ (BTRFS_BALANCE_ARGS_PROFILES | \ diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 646cdac73488..beac58b0e09c 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -78,7 +78,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes) p = xdr_inline_decode(xdr, nbytes); if (unlikely(p == NULL)) - printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n"); + printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed " + "or truncated request.\n"); return p; } @@ -889,6 +890,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r struct cb_compound_hdr_arg hdr_arg = { 0 }; struct cb_compound_hdr_res hdr_res = { NULL }; struct xdr_stream xdr_in, xdr_out; + struct xdr_buf *rq_arg = &rqstp->rq_arg; __be32 *p, status; struct cb_process_state cps = { .drc_status = 0, @@ -900,7 +902,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r dprintk("%s: start\n", __func__); - xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); + rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len; + xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base); p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); xdr_init_encode(&xdr_out, &rqstp->rq_res, p); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 326d9e10d833..31b0a52223a7 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -618,7 +618,10 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); nfs_vmtruncate(inode, attr->ia_size); } - nfs_update_inode(inode, fattr); + if (fattr->valid) + nfs_update_inode(inode, fattr); + else + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; spin_unlock(&inode->i_lock); } EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); @@ -1824,7 +1827,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) nfsi->attr_gencount = fattr->gencount; } - invalid &= ~NFS_INO_INVALID_ATTR; + + /* Don't declare attrcache up to date if there were no attrs! */ + if (fattr->valid != 0) + invalid &= ~NFS_INO_INVALID_ATTR; + /* Don't invalidate the data if we were to blame */ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 3e92a3cde15d..6b1ce9825430 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -14,7 +14,7 @@ #include "pnfs.h" #include "internal.h" -#define NFSDBG_FACILITY NFSDBG_PNFS +#define NFSDBG_FACILITY NFSDBG_PROC static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file, fmode_t fmode) @@ -284,6 +284,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, .dst_fh = NFS_FH(dst_inode), .src_offset = src_offset, .dst_offset = dst_offset, + .count = count, .dst_bitmask = server->cache_consistency_bitmask, }; struct nfs42_clone_res res = { diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 223bedda64ae..10410e8b5853 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion) return ret; idr_preload(GFP_KERNEL); spin_lock(&nn->nfs_client_lock); - ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT); + ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT); if (ret >= 0) clp->cl_cb_ident = ret; spin_unlock(&nn->nfs_client_lock); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 4aa571956cd6..db9b5fea5b3e 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -7,6 +7,7 @@ #include <linux/file.h> #include <linux/falloc.h> #include <linux/nfs_fs.h> +#include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */ #include "delegation.h" #include "internal.h" #include "iostat.h" @@ -203,6 +204,7 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, struct fd src_file; struct inode *src_inode; unsigned int bs = server->clone_blksize; + bool same_inode = false; int ret; /* dst file must be opened for writing */ @@ -221,10 +223,8 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, src_inode = file_inode(src_file.file); - /* src and dst must be different files */ - ret = -EINVAL; if (src_inode == dst_inode) - goto out_fput; + same_inode = true; /* src file must be opened for reading */ if (!(src_file.file->f_mode & FMODE_READ)) @@ -249,8 +249,16 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, goto out_fput; } + /* verify if ranges are overlapped within the same file */ + if (same_inode) { + if (dst_off + count > src_off && dst_off < src_off + count) + goto out_fput; + } + /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */ - if (dst_inode < src_inode) { + if (same_inode) { + mutex_lock(&src_inode->i_mutex); + } else if (dst_inode < src_inode) { mutex_lock_nested(&dst_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD); } else { @@ -275,7 +283,9 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1); out_unlock: - if (dst_inode < src_inode) { + if (same_inode) { + mutex_unlock(&src_inode->i_mutex); + } else if (dst_inode < src_inode) { mutex_unlock(&src_inode->i_mutex); mutex_unlock(&dst_inode->i_mutex); } else { @@ -291,46 +301,31 @@ out_drop_write: static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp) { - struct nfs_ioctl_clone_range_args args; + struct btrfs_ioctl_clone_range_args args; if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; - return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_off, args.dst_off, args.count); -} -#else -static long nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, - u64 src_off, u64 dst_off, u64 count) -{ - return -ENOTTY; -} - -static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp) -{ - return -ENOTTY; + return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_offset, + args.dest_offset, args.src_length); } -#endif /* CONFIG_NFS_V4_2 */ long nfs4_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; switch (cmd) { - case NFS_IOC_CLONE: + case BTRFS_IOC_CLONE: return nfs42_ioctl_clone(file, arg, 0, 0, 0); - case NFS_IOC_CLONE_RANGE: + case BTRFS_IOC_CLONE_RANGE: return nfs42_ioctl_clone_range(file, argp); } return -ENOTTY; } +#endif /* CONFIG_NFS_V4_2 */ const struct file_operations nfs4_file_operations = { -#ifdef CONFIG_NFS_V4_2 - .llseek = nfs4_file_llseek, -#else - .llseek = nfs_file_llseek, -#endif .read_iter = nfs_file_read, .write_iter = nfs_file_write, .mmap = nfs_file_mmap, @@ -342,14 +337,14 @@ const struct file_operations nfs4_file_operations = { .flock = nfs_flock, .splice_read = nfs_file_splice_read, .splice_write = iter_file_splice_write, -#ifdef CONFIG_NFS_V4_2 - .fallocate = nfs42_fallocate, -#endif /* CONFIG_NFS_V4_2 */ .check_flags = nfs_check_flags, .setlease = simple_nosetlease, -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NFS_V4_2 + .llseek = nfs4_file_llseek, + .fallocate = nfs42_fallocate, .unlocked_ioctl = nfs4_ioctl, -#else .compat_ioctl = nfs4_ioctl, -#endif /* CONFIG_COMPAT */ +#else + .llseek = nfs_file_llseek, +#endif }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 765a03559363..89818036f035 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7866,7 +7866,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) spin_unlock(&inode->i_lock); goto out_restart; } - if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) + if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN) goto out_restart; out: dprintk("<-- %s\n", __func__); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index dfed4f5c8fcc..4e4441216804 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3615,6 +3615,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st status = 0; if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS))) goto out; + bitmap[0] &= ~FATTR4_WORD0_FS_LOCATIONS; status = -EIO; /* Ignore borken servers that return unrequested attrs */ if (unlikely(res == NULL)) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 93496c059837..5a8ae2125b50 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -872,33 +872,38 @@ send_layoutget(struct pnfs_layout_hdr *lo, dprintk("--> %s\n", __func__); - lgp = kzalloc(sizeof(*lgp), gfp_flags); - if (lgp == NULL) - return NULL; + /* + * Synchronously retrieve layout information from server and + * store in lseg. If we race with a concurrent seqid morphing + * op, then re-send the LAYOUTGET. + */ + do { + lgp = kzalloc(sizeof(*lgp), gfp_flags); + if (lgp == NULL) + return NULL; + + i_size = i_size_read(ino); + + lgp->args.minlength = PAGE_CACHE_SIZE; + if (lgp->args.minlength > range->length) + lgp->args.minlength = range->length; + if (range->iomode == IOMODE_READ) { + if (range->offset >= i_size) + lgp->args.minlength = 0; + else if (i_size - range->offset < lgp->args.minlength) + lgp->args.minlength = i_size - range->offset; + } + lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; + lgp->args.range = *range; + lgp->args.type = server->pnfs_curr_ld->id; + lgp->args.inode = ino; + lgp->args.ctx = get_nfs_open_context(ctx); + lgp->gfp_flags = gfp_flags; + lgp->cred = lo->plh_lc_cred; - i_size = i_size_read(ino); + lseg = nfs4_proc_layoutget(lgp, gfp_flags); + } while (lseg == ERR_PTR(-EAGAIN)); - lgp->args.minlength = PAGE_CACHE_SIZE; - if (lgp->args.minlength > range->length) - lgp->args.minlength = range->length; - if (range->iomode == IOMODE_READ) { - if (range->offset >= i_size) - lgp->args.minlength = 0; - else if (i_size - range->offset < lgp->args.minlength) - lgp->args.minlength = i_size - range->offset; - } - lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; - lgp->args.range = *range; - lgp->args.type = server->pnfs_curr_ld->id; - lgp->args.inode = ino; - lgp->args.ctx = get_nfs_open_context(ctx); - lgp->gfp_flags = gfp_flags; - lgp->cred = lo->plh_lc_cred; - - /* Synchronously retrieve layout information from server and - * store in lseg. - */ - lseg = nfs4_proc_layoutget(lgp, gfp_flags); if (IS_ERR(lseg)) { switch (PTR_ERR(lseg)) { case -ENOMEM: @@ -1687,6 +1692,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) /* existing state ID, make sure the sequence number matches. */ if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { dprintk("%s forget reply due to sequence\n", __func__); + status = -EAGAIN; goto out_forget_reply; } pnfs_set_layout_stateid(lo, &res->stateid, false); diff --git a/fs/splice.c b/fs/splice.c index 801c21cd77fe..4cf700d50b40 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -809,6 +809,13 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des */ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) { + /* + * Check for signal early to make process killable when there are + * always buffers available + */ + if (signal_pending(current)) + return -ERESTARTSYS; + while (!pipe->nrbufs) { if (!pipe->writers) return 0; @@ -884,6 +891,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_from_pipe_begin(sd); do { + cond_resched(); ret = splice_from_pipe_next(pipe, sd); if (ret > 0) ret = splice_from_pipe_feed(pipe, sd, actor); diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 590ad9206e3f..02fa1dcc5969 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -162,15 +162,8 @@ void sysv_set_inode(struct inode *inode, dev_t rdev) inode->i_fop = &sysv_dir_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISLNK(inode->i_mode)) { - if (inode->i_blocks) { - inode->i_op = &sysv_symlink_inode_operations; - inode->i_mapping->a_ops = &sysv_aops; - } else { - inode->i_op = &simple_symlink_inode_operations; - inode->i_link = (char *)SYSV_I(inode)->i_data; - nd_terminate_link(inode->i_link, inode->i_size, - sizeof(SYSV_I(inode)->i_data) - 1); - } + inode->i_op = &sysv_symlink_inode_operations; + inode->i_mapping->a_ops = &sysv_aops; } else init_special_inode(inode, inode->i_mode, rdev); } diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 9c747cb14ad8..d2f41477f8ae 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, struct irq_phys_map *map, bool level); void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); -int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int irq); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 054833939995..1991aea2ec4c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, } static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, - const char *name, const char *cells_name, - size_t index, struct acpi_reference_args *args) + const char *name, size_t index, + struct acpi_reference_args *args) { return -ENXIO; } diff --git a/include/linux/kref.h b/include/linux/kref.h index 484604d184be..e15828fd71f1 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -19,7 +19,6 @@ #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/mutex.h> -#include <linux/spinlock.h> struct kref { atomic_t refcount; @@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) return kref_sub(kref, 1, release); } -/** - * kref_put_spinlock_irqsave - decrement refcount for object. - * @kref: object. - * @release: pointer to the function that will clean up the object when the - * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. - * @lock: lock to take in release case - * - * Behaves identical to kref_put with one exception. If the reference count - * drops to zero, the lock will be taken atomically wrt dropping the reference - * count. The release function has to call spin_unlock() without _irqrestore. - */ -static inline int kref_put_spinlock_irqsave(struct kref *kref, - void (*release)(struct kref *kref), - spinlock_t *lock) -{ - unsigned long flags; - - WARN_ON(release == NULL); - if (atomic_add_unless(&kref->refcount, -1, 1)) - return 0; - spin_lock_irqsave(lock, flags); - if (atomic_dec_and_test(&kref->refcount)) { - release(kref); - local_irq_restore(flags); - return 1; - } - spin_unlock_irqrestore(lock, flags); - return 0; -} - static inline int kref_put_mutex(struct kref *kref, void (*release)(struct kref *kref), struct mutex *lock) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5706a2108f0a..c923350ca20a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ idx++) +static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) +{ + struct kvm_vcpu *vcpu; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) + if (vcpu->vcpu_id == id) + return vcpu; + return NULL; +} + #define kvm_for_each_memslot(memslot, slots) \ for (memslot = &slots->memslots[0]; \ memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 69c9057e1ab8..3db5552b17d5 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -58,7 +58,6 @@ enum { struct nvm_id_group { u8 mtype; u8 fmtype; - u16 res16; u8 num_ch; u8 num_lun; u8 num_pln; @@ -74,9 +73,9 @@ struct nvm_id_group { u32 tbet; u32 tbem; u32 mpos; + u32 mccap; u16 cpar; - u8 res[913]; -} __packed; +}; struct nvm_addr_format { u8 ch_offset; @@ -91,19 +90,15 @@ struct nvm_addr_format { u8 pg_len; u8 sect_offset; u8 sect_len; - u8 res[4]; }; struct nvm_id { u8 ver_id; u8 vmnt; u8 cgrps; - u8 res[5]; u32 cap; u32 dom; struct nvm_addr_format ppaf; - u8 ppat; - u8 resv[224]; struct nvm_id_group groups[4]; } __packed; @@ -123,39 +118,28 @@ struct nvm_tgt_instance { #define NVM_VERSION_MINOR 0 #define NVM_VERSION_PATCH 0 -#define NVM_SEC_BITS (8) -#define NVM_PL_BITS (6) -#define NVM_PG_BITS (16) #define NVM_BLK_BITS (16) -#define NVM_LUN_BITS (10) +#define NVM_PG_BITS (16) +#define NVM_SEC_BITS (8) +#define NVM_PL_BITS (8) +#define NVM_LUN_BITS (8) #define NVM_CH_BITS (8) struct ppa_addr { + /* Generic structure for all addresses */ union { - /* Channel-based PPA format in nand 4x2x2x2x8x10 */ - struct { - u64 ch : 4; - u64 sec : 2; /* 4 sectors per page */ - u64 pl : 2; /* 4 planes per LUN */ - u64 lun : 2; /* 4 LUNs per channel */ - u64 pg : 8; /* 256 pages per block */ - u64 blk : 10;/* 1024 blocks per plane */ - u64 resved : 36; - } chnl; - - /* Generic structure for all addresses */ struct { + u64 blk : NVM_BLK_BITS; + u64 pg : NVM_PG_BITS; u64 sec : NVM_SEC_BITS; u64 pl : NVM_PL_BITS; - u64 pg : NVM_PG_BITS; - u64 blk : NVM_BLK_BITS; u64 lun : NVM_LUN_BITS; u64 ch : NVM_CH_BITS; } g; u64 ppa; }; -} __packed; +}; struct nvm_rq { struct nvm_tgt_instance *ins; @@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) struct nvm_block; typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); -typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); +typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, nvm_l2p_update_fn *, void *); -typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, +typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int, nvm_bb_update_fn *, void *); typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); @@ -210,7 +194,7 @@ struct nvm_dev_ops { nvm_id_fn *identity; nvm_get_l2p_tbl_fn *get_l2p_tbl; nvm_op_bb_tbl_fn *get_bb_tbl; - nvm_op_set_bb_fn *set_bb; + nvm_op_set_bb_fn *set_bb_tbl; nvm_submit_io_fn *submit_io; nvm_erase_blk_fn *erase_block; @@ -220,7 +204,7 @@ struct nvm_dev_ops { nvm_dev_dma_alloc_fn *dev_dma_alloc; nvm_dev_dma_free_fn *dev_dma_free; - uint8_t max_phys_sect; + unsigned int max_phys_sect; }; struct nvm_lun { @@ -229,7 +213,9 @@ struct nvm_lun { int lun_id; int chnl_id; + unsigned int nr_inuse_blocks; /* Number of used blocks */ unsigned int nr_free_blocks; /* Number of unused blocks */ + unsigned int nr_bad_blocks; /* Number of bad blocks */ struct nvm_block *blocks; spinlock_t lock; @@ -263,8 +249,7 @@ struct nvm_dev { int blks_per_lun; int sec_size; int oob_size; - int addr_mode; - struct nvm_addr_format addr_format; + struct nvm_addr_format ppaf; /* Calculated/Cached values. These do not reflect the actual usable * blocks at run-time. @@ -290,118 +275,45 @@ struct nvm_dev { char name[DISK_NAME_LEN]; }; -/* fallback conversion */ -static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, - struct ppa_addr r) -{ - struct ppa_addr l; - - l.ppa = r.g.sec + - r.g.pg * dev->sec_per_pg + - r.g.blk * (dev->pgs_per_blk * - dev->sec_per_pg) + - r.g.lun * (dev->blks_per_lun * - dev->pgs_per_blk * - dev->sec_per_pg) + - r.g.ch * (dev->blks_per_lun * - dev->pgs_per_blk * - dev->luns_per_chnl * - dev->sec_per_pg); - - return l; -} - -/* fallback conversion */ -static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, - struct ppa_addr r) +static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, + struct ppa_addr r) { struct ppa_addr l; - int secs, pgs, blks, luns; - sector_t ppa = r.ppa; - l.ppa = 0; - - div_u64_rem(ppa, dev->sec_per_pg, &secs); - l.g.sec = secs; - - sector_div(ppa, dev->sec_per_pg); - div_u64_rem(ppa, dev->sec_per_blk, &pgs); - l.g.pg = pgs; - - sector_div(ppa, dev->pgs_per_blk); - div_u64_rem(ppa, dev->blks_per_lun, &blks); - l.g.blk = blks; - - sector_div(ppa, dev->blks_per_lun); - div_u64_rem(ppa, dev->luns_per_chnl, &luns); - l.g.lun = luns; - - sector_div(ppa, dev->luns_per_chnl); - l.g.ch = ppa; + l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; + l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; + l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; + l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; + l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; + l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; return l; } -static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) +static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, + struct ppa_addr r) { struct ppa_addr l; - l.ppa = 0; - - l.chnl.sec = r.g.sec; - l.chnl.pl = r.g.pl; - l.chnl.pg = r.g.pg; - l.chnl.blk = r.g.blk; - l.chnl.lun = r.g.lun; - l.chnl.ch = r.g.ch; - - return l; -} - -static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) -{ - struct ppa_addr l; - - l.ppa = 0; - - l.g.sec = r.chnl.sec; - l.g.pl = r.chnl.pl; - l.g.pg = r.chnl.pg; - l.g.blk = r.chnl.blk; - l.g.lun = r.chnl.lun; - l.g.ch = r.chnl.ch; + /* + * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. + */ + l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & + (((1 << dev->ppaf.blk_len) - 1)); + l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & + (((1 << dev->ppaf.pg_len) - 1)); + l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & + (((1 << dev->ppaf.sect_len) - 1)); + l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & + (((1 << dev->ppaf.pln_len) - 1)); + l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & + (((1 << dev->ppaf.lun_len) - 1)); + l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & + (((1 << dev->ppaf.ch_len) - 1)); return l; } -static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, - struct ppa_addr gppa) -{ - switch (dev->addr_mode) { - case NVM_ADDRMODE_LINEAR: - return __linear_to_generic_addr(dev, gppa); - case NVM_ADDRMODE_CHANNEL: - return __chnl_to_generic_addr(gppa); - default: - BUG(); - } - return gppa; -} - -static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, - struct ppa_addr gppa) -{ - switch (dev->addr_mode) { - case NVM_ADDRMODE_LINEAR: - return __generic_to_linear_addr(dev, gppa); - case NVM_ADDRMODE_CHANNEL: - return __generic_to_chnl_addr(gppa); - default: - BUG(); - } - return gppa; -} - static inline int ppa_empty(struct ppa_addr ppa_addr) { return (ppa_addr.ppa == ADDR_EMPTY); @@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, unsigned long); typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); -typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); +typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); struct nvmm_type { const char *name; @@ -492,7 +404,7 @@ struct nvmm_type { nvmm_get_lun_fn *get_lun; /* Statistics */ - nvmm_free_blocks_print_fn *free_blocks_print; + nvmm_lun_info_print_fn *lun_info_print; struct list_head list; }; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 570d630f98ae..11bbae44f4cb 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -251,6 +251,7 @@ struct nfs4_layoutget { struct nfs4_layoutget_res res; struct rpc_cred *cred; gfp_t gfp_flags; + long timeout; }; struct nfs4_getdeviceinfo_args { diff --git a/include/linux/pci.h b/include/linux/pci.h index e828e7b4afec..6ae25aae88fd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -412,9 +412,18 @@ struct pci_host_bridge { void (*release_fn)(struct pci_host_bridge *); void *release_data; unsigned int ignore_reset_delay:1; /* for entire hierarchy */ + /* Resource alignment requirements */ + resource_size_t (*align_resource)(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); }; #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) + +struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); + void pci_set_host_bridge_release(struct pci_host_bridge *bridge, void (*release_fn)(struct pci_host_bridge *), void *release_data); diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index 80af3cd35ae4..72ce932c69b2 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -71,7 +71,7 @@ struct scpi_ops { int (*sensor_get_value)(u16, u32 *); }; -#if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL) +#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL) struct scpi_ops *get_scpi_ops(void); #else static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a156b82dd14c..c2b66a277e98 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename, asmlinkage long sys_lchown(const char __user *filename, uid_t user, gid_t group); asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); -#ifdef CONFIG_UID16 +#ifdef CONFIG_HAVE_UID16 asmlinkage long sys_chown16(const char __user *filename, old_uid_t user, old_gid_t group); asmlinkage long sys_lchown16(const char __user *filename, diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 4014a59828fc..613c29bd6baf 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -438,7 +438,8 @@ static inline void thermal_zone_device_unregister( static inline int thermal_zone_bind_cooling_device( struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower) + unsigned long upper, unsigned long lower, + unsigned int weight) { return -ENODEV; } static inline int thermal_zone_unbind_cooling_device( struct thermal_zone_device *tz, int trip, diff --git a/include/linux/types.h b/include/linux/types.h index 70d8500bddf1..70dd3dfde631 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t; typedef unsigned long uintptr_t; -#ifdef CONFIG_UID16 +#ifdef CONFIG_HAVE_UID16 /* This is defined by include/asm-{arch}/posix_types.h */ typedef __kernel_old_uid_t old_uid_t; typedef __kernel_old_gid_t old_gid_t; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 0a2c74008e53..aabf0aca0171 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -474,7 +474,7 @@ struct se_cmd { struct completion cmd_wait_comp; const struct target_core_fabric_ops *se_tfo; sense_reason_t (*execute_cmd)(struct se_cmd *); - sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); + sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); void *protocol_data; unsigned char *t_task_cdb; diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h index 654bae3f1a38..5e6296160361 100644 --- a/include/uapi/linux/nfs.h +++ b/include/uapi/linux/nfs.h @@ -33,17 +33,6 @@ #define NFS_PIPE_DIRNAME "nfs" -/* NFS ioctls */ -/* Let's follow btrfs lead on CLONE to avoid messing userspace */ -#define NFS_IOC_CLONE _IOW(0x94, 9, int) -#define NFS_IOC_CLONE_RANGE _IOW(0x94, 13, int) - -struct nfs_ioctl_clone_range_args { - __s64 src_fd; - __u64 src_off, count; - __u64 dst_off; -}; - /* * NFS stats. The good thing with these values is that NFSv3 errors are * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which diff --git a/kernel/pid.c b/kernel/pid.c index ca368793808e..78b3d9f80d44 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -467,7 +467,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type) rcu_read_lock(); if (type != PIDTYPE_PID) task = task->group_leader; - pid = get_pid(task->pids[type].pid); + pid = get_pid(rcu_dereference(task->pids[type].pid)); rcu_read_unlock(); return pid; } @@ -528,7 +528,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, if (likely(pid_alive(task))) { if (type != PIDTYPE_PID) task = task->group_leader; - nr = pid_nr_ns(task->pids[type].pid, ns); + nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); } rcu_read_unlock(); diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 229956bf8457..95f82d8d4888 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -353,12 +353,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) { struct rpc_xprt *xprt = req->rq_xprt; struct svc_serv *bc_serv = xprt->bc_serv; + struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf; spin_lock(&xprt->bc_pa_lock); list_del(&req->rq_bc_pa_list); xprt_dec_alloc_count(xprt, 1); spin_unlock(&xprt->bc_pa_lock); + if (copied <= rq_rcv_buf->head[0].iov_len) { + rq_rcv_buf->head[0].iov_len = copied; + rq_rcv_buf->page_len = 0; + } else { + rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len; + } + req->rq_private_buf.len = copied; set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index bc5b7b5032ca..7fccf9675df8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); + rqstp->rq_arg.len = req->rq_private_buf.len; /* reset result send buffer "put" position */ resv->iov_len = 0; diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 927db9f35ad6..696ccfa08d10 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -845,6 +845,8 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) size_t datalen = prep->datalen; int ret = 0; + if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) + return -ENOKEY; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 903dace648a1..16dec53184b6 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -1007,13 +1007,16 @@ static void trusted_rcu_free(struct rcu_head *rcu) */ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { - struct trusted_key_payload *p = key->payload.data[0]; + struct trusted_key_payload *p; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; + if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) + return -ENOKEY; + p = key->payload.data[0]; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 28cb30f80256..8705d79b2c6f 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -120,7 +120,10 @@ int user_update(struct key *key, struct key_preparsed_payload *prep) if (ret == 0) { /* attach the new data, displacing the old */ - zap = key->payload.data[0]; + if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags)) + zap = key->payload.data[0]; + else + zap = NULL; rcu_assign_keypointer(key, upayload); key->expiry = 0; } diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index 18643bf9894d..456e1a9bcfde 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c @@ -638,7 +638,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, { struct avtab_node *node; - if (!ctab || !key || !avd || !xperms) + if (!ctab || !key || !avd) return; for (node = avtab_search_node(ctab, key); node; @@ -657,7 +657,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) == (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) avd->auditallow |= node->datum.u.data; - if ((node->key.specified & AVTAB_ENABLED) && + if (xperms && (node->key.specified & AVTAB_ENABLED) && (node->key.specified & AVTAB_XPERMS)) services_compute_xperms_drivers(xperms, node); } diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c index 5d99436dfcae..0cda05c72f50 100644 --- a/sound/firewire/dice/dice.c +++ b/sound/firewire/dice/dice.c @@ -12,9 +12,11 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL v2"); #define OUI_WEISS 0x001c6a +#define OUI_LOUD 0x000ff2 #define DICE_CATEGORY_ID 0x04 #define WEISS_CATEGORY_ID 0x00 +#define LOUD_CATEGORY_ID 0x10 static int dice_interface_check(struct fw_unit *unit) { @@ -57,6 +59,8 @@ static int dice_interface_check(struct fw_unit *unit) } if (vendor == OUI_WEISS) category = WEISS_CATEGORY_ID; + else if (vendor == OUI_LOUD) + category = LOUD_CATEGORY_ID; else category = DICE_CATEGORY_ID; if (device->config_rom[3] != ((vendor << 8) | category) || diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 8a7fbdcb4072..963f82430938 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -312,6 +312,10 @@ enum { (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\ AZX_DCAPS_I915_POWERWELL) +#define AZX_DCAPS_INTEL_BROXTON \ + (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\ + AZX_DCAPS_I915_POWERWELL) + /* quirks for ATI SB / AMD Hudson */ #define AZX_DCAPS_PRESET_ATI_SB \ (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\ @@ -2124,6 +2128,9 @@ static const struct pci_device_id azx_ids[] = { /* Sunrise Point-LP */ { PCI_DEVICE(0x8086, 0x9d70), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Broxton-P(Apollolake) */ + { PCI_DEVICE(0x8086, 0x5a98), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, /* Haswell */ { PCI_DEVICE(0x8086, 0x0a0c), .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 60cd9e700909..bdb6f226d006 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2378,7 +2378,8 @@ static int patch_generic_hdmi(struct hda_codec *codec) * can cover the codec power request, and so need not set this flag. * For previous platforms, there is no such power well feature. */ - if (is_valleyview_plus(codec) || is_skylake(codec)) + if (is_valleyview_plus(codec) || is_skylake(codec) || + is_broxton(codec)) codec->core.link_power_control = 1; if (is_haswell_plus(codec) || is_valleyview_plus(codec)) { diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2f7b065f9ac4..9bedf7c85e29 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1759,6 +1759,7 @@ enum { ALC882_FIXUP_NO_PRIMARY_HP, ALC887_FIXUP_ASUS_BASS, ALC887_FIXUP_BASS_CHMAP, + ALC882_FIXUP_DISABLE_AAMIX, }; static void alc889_fixup_coef(struct hda_codec *codec, @@ -1920,6 +1921,8 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec, static void alc_fixup_bass_chmap(struct hda_codec *codec, const struct hda_fixup *fix, int action); +static void alc_fixup_disable_aamix(struct hda_codec *codec, + const struct hda_fixup *fix, int action); static const struct hda_fixup alc882_fixups[] = { [ALC882_FIXUP_ABIT_AW9D_MAX] = { @@ -2151,6 +2154,10 @@ static const struct hda_fixup alc882_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_bass_chmap, }, + [ALC882_FIXUP_DISABLE_AAMIX] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_disable_aamix, + }, }; static const struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -2218,6 +2225,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), + SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), @@ -4587,6 +4595,7 @@ enum { ALC292_FIXUP_DISABLE_AAMIX, ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC275_FIXUP_DELL_XPS, + ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, }; static const struct hda_fixup alc269_fixups[] = { @@ -5167,6 +5176,17 @@ static const struct hda_fixup alc269_fixups[] = { {} } }, + [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* Disable pass-through path for FRONT 14h */ + {0x20, AC_VERB_SET_COEF_INDEX, 0x36}, + {0x20, AC_VERB_SET_PROC_COEF, 0x1737}, + {} + }, + .chained = true, + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -5180,8 +5200,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), + SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), + SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), @@ -5204,6 +5226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), + SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 826122d8acee..2c7c5eb8b1e9 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -3110,6 +3110,29 @@ static void stac92hd71bxx_fixup_hp_hdx(struct hda_codec *codec, spec->gpio_led = 0x08; } +static bool is_hp_output(struct hda_codec *codec, hda_nid_t pin) +{ + unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin); + + /* count line-out, too, as BIOS sets often so */ + return get_defcfg_connect(pin_cfg) != AC_JACK_PORT_NONE && + (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT || + get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT); +} + +static void fixup_hp_headphone(struct hda_codec *codec, hda_nid_t pin) +{ + unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin); + + /* It was changed in the BIOS to just satisfy MS DTM. + * Lets turn it back into slaved HP + */ + pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) | + (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT); + pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) | + 0x1f; + snd_hda_codec_set_pincfg(codec, pin, pin_cfg); +} static void stac92hd71bxx_fixup_hp(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -3119,22 +3142,12 @@ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec, if (action != HDA_FIXUP_ACT_PRE_PROBE) return; - if (hp_blike_system(codec->core.subsystem_id)) { - unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f); - if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT || - get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER || - get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) { - /* It was changed in the BIOS to just satisfy MS DTM. - * Lets turn it back into slaved HP - */ - pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) - | (AC_JACK_HP_OUT << - AC_DEFCFG_DEVICE_SHIFT); - pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC - | AC_DEFCFG_SEQUENCE))) - | 0x1f; - snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg); - } + /* when both output A and F are assigned, these are supposedly + * dock and built-in headphones; fix both pin configs + */ + if (is_hp_output(codec, 0x0a) && is_hp_output(codec, 0x0f)) { + fixup_hp_headphone(codec, 0x0a); + fixup_hp_headphone(codec, 0x0f); } if (find_mute_led_cfg(codec, 1)) diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 7661616f3636..5b4c58c3e2c5 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -174,6 +174,8 @@ struct snd_usb_midi_in_endpoint { u8 running_status_length; } ports[0x10]; u8 seen_f5; + bool in_sysex; + u8 last_cin; u8 error_resubmit; int current_port; }; @@ -468,6 +470,39 @@ static void snd_usbmidi_maudio_broken_running_status_input( } /* + * QinHeng CH345 is buggy: every second packet inside a SysEx has not CIN 4 + * but the previously seen CIN, but still with three data bytes. + */ +static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep, + uint8_t *buffer, int buffer_length) +{ + unsigned int i, cin, length; + + for (i = 0; i + 3 < buffer_length; i += 4) { + if (buffer[i] == 0 && i > 0) + break; + cin = buffer[i] & 0x0f; + if (ep->in_sysex && + cin == ep->last_cin && + (buffer[i + 1 + (cin == 0x6)] & 0x80) == 0) + cin = 0x4; +#if 0 + if (buffer[i + 1] == 0x90) { + /* + * Either a corrupted running status or a real note-on + * message; impossible to detect reliably. + */ + } +#endif + length = snd_usbmidi_cin_length[cin]; + snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length); + ep->in_sysex = cin == 0x4; + if (!ep->in_sysex) + ep->last_cin = cin; + } +} + +/* * CME protocol: like the standard protocol, but SysEx commands are sent as a * single USB packet preceded by a 0x0F byte. */ @@ -660,6 +695,12 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = { .output_packet = snd_usbmidi_output_standard_packet, }; +static struct usb_protocol_ops snd_usbmidi_ch345_broken_sysex_ops = { + .input = ch345_broken_sysex_input, + .output = snd_usbmidi_standard_output, + .output_packet = snd_usbmidi_output_standard_packet, +}; + /* * AKAI MPD16 protocol: * @@ -1341,6 +1382,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi, * Various chips declare a packet size larger than 4 bytes, but * do not actually work with larger packets: */ + case USB_ID(0x0a67, 0x5011): /* Medeli DD305 */ case USB_ID(0x0a92, 0x1020): /* ESI M4U */ case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */ case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */ @@ -2378,6 +2420,10 @@ int snd_usbmidi_create(struct snd_card *card, err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); break; + case QUIRK_MIDI_CH345: + umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops; + err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); + break; default: dev_err(&umidi->dev->dev, "invalid quirk type %d\n", quirk->type); diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 1a1e2e4df35e..c60a776e815d 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2829,6 +2829,17 @@ YAMAHA_DEVICE(0x7010, "UB99"), .idProduct = 0x1020, }, +/* QinHeng devices */ +{ + USB_DEVICE(0x1a86, 0x752d), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "QinHeng", + .product_name = "CH345", + .ifnum = 1, + .type = QUIRK_MIDI_CH345 + } +}, + /* KeithMcMillen Stringport */ { USB_DEVICE(0x1f38, 0x0001), diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 5ca80e7d30cd..7016ad898187 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -538,6 +538,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip, [QUIRK_MIDI_CME] = create_any_midi_quirk, [QUIRK_MIDI_AKAI] = create_any_midi_quirk, [QUIRK_MIDI_FTDI] = create_any_midi_quirk, + [QUIRK_MIDI_CH345] = create_any_midi_quirk, [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 15a12715bd05..b665d85555cb 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -95,6 +95,7 @@ enum quirk_type { QUIRK_MIDI_AKAI, QUIRK_MIDI_US122L, QUIRK_MIDI_FTDI, + QUIRK_MIDI_CH345, QUIRK_AUDIO_STANDARD_INTERFACE, QUIRK_AUDIO_FIXED_ENDPOINT, QUIRK_AUDIO_EDIROL_UAXX, diff --git a/tools/testing/selftests/futex/README b/tools/testing/selftests/futex/README index 3224a049b196..0558bb9ce0a6 100644 --- a/tools/testing/selftests/futex/README +++ b/tools/testing/selftests/futex/README @@ -27,7 +27,7 @@ o The build system shall remain as simple as possible, avoiding any archive or o Where possible, any helper functions or other package-wide code shall be implemented in header files, avoiding the need to compile intermediate object files. -o External dependendencies shall remain as minimal as possible. Currently gcc +o External dependencies shall remain as minimal as possible. Currently gcc and glibc are the only dependencies. o Tests return 0 for success and < 0 for failure. diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index e38cc54942db..882fe83a3554 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -492,6 +492,9 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS) pid_t parent = getppid(); int fd; void *map1, *map2; + int page_size = sysconf(_SC_PAGESIZE); + + ASSERT_LT(0, page_size); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); @@ -504,16 +507,16 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS) EXPECT_EQ(parent, syscall(__NR_getppid)); map1 = (void *)syscall(sysno, - NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, PAGE_SIZE); + NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); EXPECT_NE(MAP_FAILED, map1); /* mmap2() should never return. */ map2 = (void *)syscall(sysno, - NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); + NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); EXPECT_EQ(MAP_FAILED, map2); /* The test failed, so clean up the resources. */ - munmap(map1, PAGE_SIZE); - munmap(map2, PAGE_SIZE); + munmap(map1, page_size); + munmap(map2, page_size); close(fd); } diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 21a0ab2d8919..69bca185c471 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -221,17 +221,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) kvm_timer_update_state(vcpu); /* - * If we enter the guest with the virtual input level to the VGIC - * asserted, then we have already told the VGIC what we need to, and - * we don't need to exit from the guest until the guest deactivates - * the already injected interrupt, so therefore we should set the - * hardware active state to prevent unnecessary exits from the guest. - * - * Conversely, if the virtual input level is deasserted, then always - * clear the hardware active state to ensure that hardware interrupts - * from the timer triggers a guest exit. - */ - if (timer->irq.level) + * If we enter the guest with the virtual input level to the VGIC + * asserted, then we have already told the VGIC what we need to, and + * we don't need to exit from the guest until the guest deactivates + * the already injected interrupt, so therefore we should set the + * hardware active state to prevent unnecessary exits from the guest. + * + * Also, if we enter the guest with the virtual timer interrupt active, + * then it must be active on the physical distributor, because we set + * the HW bit and the guest must be able to deactivate the virtual and + * physical interrupt at the same time. + * + * Conversely, if the virtual input level is deasserted and the virtual + * interrupt is not active, then always clear the hardware active state + * to ensure that hardware interrupts from the timer triggers a guest + * exit. + */ + if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map)) phys_active = true; else phys_active = false; diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 533538385d5d..65461f821a75 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1096,6 +1096,27 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu) vgic_set_lr(vcpu, lr_nr, vlr); } +static bool dist_active_irq(struct kvm_vcpu *vcpu) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); +} + +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map) +{ + int i; + + for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) { + struct vgic_lr vlr = vgic_get_lr(vcpu, i); + + if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE) + return true; + } + + return dist_active_irq(vcpu); +} + /* * An interrupt may have been disabled after being made pending on the * CPU interface (the classic case is a timer running while we're @@ -1248,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) * may have been serviced from another vcpu. In all cases, * move along. */ - if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) + if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu)) goto epilog; /* SGIs */ @@ -1396,25 +1417,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) { struct vgic_dist *dist = &vcpu->kvm->arch.vgic; - struct irq_phys_map *map; - bool phys_active; bool level_pending; - int ret; if (!(vlr.state & LR_HW)) return false; - map = vgic_irq_map_search(vcpu, vlr.irq); - BUG_ON(!map); - - ret = irq_get_irqchip_state(map->irq, - IRQCHIP_STATE_ACTIVE, - &phys_active); - - WARN_ON(ret); - - if (phys_active) - return 0; + if (vlr.state & LR_STATE_ACTIVE) + return false; spin_lock(&dist->lock); level_pending = process_queued_irq(vcpu, lr, vlr); @@ -1479,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); } -int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu) -{ - struct vgic_dist *dist = &vcpu->kvm->arch.vgic; - - if (!irqchip_in_kernel(vcpu->kvm)) - return 0; - - return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); -} - - void vgic_kick_vcpus(struct kvm *kvm) { struct kvm_vcpu *vcpu; |