diff options
author | David S. Miller <davem@davemloft.net> | 2015-07-14 03:28:09 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-07-14 03:28:09 +0300 |
commit | 638d3c63811e31b2745f7fdd568b38c8abcffe03 (patch) | |
tree | 606426ab4de84e59c5f50e1e3cce6e24819d45af /arch | |
parent | 74fe61f17e999a458d5f64ca2aa9a0282ca32198 (diff) | |
parent | f760b87f8f12eb262f14603e65042996fe03720e (diff) | |
download | linux-638d3c63811e31b2745f7fdd568b38c8abcffe03.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
net/bridge/br_mdb.c
Minor conflict in br_mdb.c, in 'net' we added a memset of the
on-stack 'ip' variable whereas in 'net-next' we assign a new
member 'vid'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
131 files changed, 2429 insertions, 715 deletions
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index e0cf99893212..807f7d61d7a7 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -71,15 +71,12 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) mmput(mm); if (exe_file) { - path = exe_file->f_path; - path_get(&exe_file->f_path); + path_nm = file_path(exe_file, buf, 255); fput(exe_file); - path_nm = d_path(&path, buf, 255); - path_put(&path); } done: - pr_info("Path: %s\n", path_nm); + pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); } static void show_faulting_vma(unsigned long address, char *buf) @@ -103,8 +100,7 @@ static void show_faulting_vma(unsigned long address, char *buf) if (vma && (vma->vm_start <= address)) { struct file *file = vma->vm_file; if (file) { - struct path *path = &file->f_path; - nm = d_path(path, buf, PAGE_SIZE - 1); + nm = file_path(file, buf, PAGE_SIZE - 1); inode = file_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a750c1425c3a..1c5021002fe4 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1693,6 +1693,12 @@ config HIGHMEM config HIGHPTE bool "Allocate 2nd-level pagetables from highmem" depends on HIGHMEM + help + The VM uses one page of physical memory for each page table. + For systems with a lot of processes, this can use a lot of + precious low memory, eventually leading to low memory being + consumed by page tables. Setting this option will allow + user-space 2nd level page tables to reside in high memory. config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index f1b157971366..a2e16f940394 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR config DEBUG_SET_MODULE_RONX bool "Set loadable kernel module data as NX and text as RO" - depends on MODULES + depends on MODULES && MMU ---help--- This option helps catch unintended modifications to loadable kernel module's text and read-only data. It also prevents execution diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts index 901739fcb85a..5c42d259fa68 100644 --- a/arch/arm/boot/dts/am335x-boneblack.dts +++ b/arch/arm/boot/dts/am335x-boneblack.dts @@ -80,3 +80,7 @@ status = "okay"; }; }; + +&rtc { + system-power-controller; +}; diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index c80a3e233792..ade28c790f4b 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -132,6 +132,12 @@ }; }; + emif: emif@4c000000 { + compatible = "ti,emif-am4372"; + reg = <0x4c000000 0x1000000>; + ti,hwmods = "emif"; + }; + edma: edma@49000000 { compatible = "ti,edma3"; ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; @@ -941,6 +947,7 @@ ti,hwmods = "dss_rfbi"; clocks = <&disp_clk>; clock-names = "fck"; + status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index a42cc377a862..a63bf78191ea 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -605,6 +605,10 @@ phy-supply = <&ldousb_reg>; }; +&usb2_phy2 { + phy-supply = <&ldousb_reg>; +}; + &usb1 { dr_mode = "host"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi index 0854d4493da7..3de9b761cc1a 100644 --- a/arch/arm/boot/dts/armada-xp.dtsi +++ b/arch/arm/boot/dts/armada-xp.dtsi @@ -305,7 +305,7 @@ spi0_pins: spi0-pins { marvell,pins = "mpp36", "mpp37", "mpp38", "mpp39"; - marvell,function = "spi"; + marvell,function = "spi0"; }; uart2_pins: uart2-pins { diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index d260ba779ae5..18177f5a7464 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -1148,7 +1148,7 @@ usb2: gadget@fff78000 { #address-cells = <1>; #size-cells = <0>; - compatible = "atmel,at91sam9rl-udc"; + compatible = "atmel,at91sam9g45-udc"; reg = <0x00600000 0x80000 0xfff78000 0x400>; interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>; diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 7521bdf17ef2..b6c8df8d380e 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -1108,7 +1108,7 @@ usb2: gadget@f803c000 { #address-cells = <1>; #size-cells = <0>; - compatible = "atmel,at91sam9rl-udc"; + compatible = "atmel,at91sam9g45-udc"; reg = <0x00500000 0x80000 0xf803c000 0x400>; interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi index 5dfd3a44bf82..3e21311f9514 100644 --- a/arch/arm/boot/dts/atlas7.dtsi +++ b/arch/arm/boot/dts/atlas7.dtsi @@ -135,6 +135,1025 @@ compatible = "sirf,atlas7-ioc"; reg = <0x18880000 0x1000>, <0x10E40000 0x1000>; + + audio_ac97_pmx: audio_ac97@0 { + audio_ac97 { + groups = "audio_ac97_grp"; + function = "audio_ac97"; + }; + }; + + audio_func_dbg_pmx: audio_func_dbg@0 { + audio_func_dbg { + groups = "audio_func_dbg_grp"; + function = "audio_func_dbg"; + }; + }; + + audio_i2s_pmx: audio_i2s@0 { + audio_i2s { + groups = "audio_i2s_grp"; + function = "audio_i2s"; + }; + }; + + audio_i2s_2ch_pmx: audio_i2s_2ch@0 { + audio_i2s_2ch { + groups = "audio_i2s_2ch_grp"; + function = "audio_i2s_2ch"; + }; + }; + + audio_i2s_extclk_pmx: audio_i2s_extclk@0 { + audio_i2s_extclk { + groups = "audio_i2s_extclk_grp"; + function = "audio_i2s_extclk"; + }; + }; + + audio_uart0_pmx: audio_uart0@0 { + audio_uart0 { + groups = "audio_uart0_grp"; + function = "audio_uart0"; + }; + }; + + audio_uart1_pmx: audio_uart1@0 { + audio_uart1 { + groups = "audio_uart1_grp"; + function = "audio_uart1"; + }; + }; + + audio_uart2_pmx0: audio_uart2@0 { + audio_uart2_0 { + groups = "audio_uart2_grp0"; + function = "audio_uart2_m0"; + }; + }; + + audio_uart2_pmx1: audio_uart2@1 { + audio_uart2_1 { + groups = "audio_uart2_grp1"; + function = "audio_uart2_m1"; + }; + }; + + c_can_trnsvr_pmx: c_can_trnsvr@0 { + c_can_trnsvr { + groups = "c_can_trnsvr_grp"; + function = "c_can_trnsvr"; + }; + }; + + c0_can_pmx0: c0_can@0 { + c0_can_0 { + groups = "c0_can_grp0"; + function = "c0_can_m0"; + }; + }; + + c0_can_pmx1: c0_can@1 { + c0_can_1 { + groups = "c0_can_grp1"; + function = "c0_can_m1"; + }; + }; + + c1_can_pmx0: c1_can@0 { + c1_can_0 { + groups = "c1_can_grp0"; + function = "c1_can_m0"; + }; + }; + + c1_can_pmx1: c1_can@1 { + c1_can_1 { + groups = "c1_can_grp1"; + function = "c1_can_m1"; + }; + }; + + c1_can_pmx2: c1_can@2 { + c1_can_2 { + groups = "c1_can_grp2"; + function = "c1_can_m2"; + }; + }; + + ca_audio_lpc_pmx: ca_audio_lpc@0 { + ca_audio_lpc { + groups = "ca_audio_lpc_grp"; + function = "ca_audio_lpc"; + }; + }; + + ca_bt_lpc_pmx: ca_bt_lpc@0 { + ca_bt_lpc { + groups = "ca_bt_lpc_grp"; + function = "ca_bt_lpc"; + }; + }; + + ca_coex_pmx: ca_coex@0 { + ca_coex { + groups = "ca_coex_grp"; + function = "ca_coex"; + }; + }; + + ca_curator_lpc_pmx: ca_curator_lpc@0 { + ca_curator_lpc { + groups = "ca_curator_lpc_grp"; + function = "ca_curator_lpc"; + }; + }; + + ca_pcm_debug_pmx: ca_pcm_debug@0 { + ca_pcm_debug { + groups = "ca_pcm_debug_grp"; + function = "ca_pcm_debug"; + }; + }; + + ca_pio_pmx: ca_pio@0 { + ca_pio { + groups = "ca_pio_grp"; + function = "ca_pio"; + }; + }; + + ca_sdio_debug_pmx: ca_sdio_debug@0 { + ca_sdio_debug { + groups = "ca_sdio_debug_grp"; + function = "ca_sdio_debug"; + }; + }; + + ca_spi_pmx: ca_spi@0 { + ca_spi { + groups = "ca_spi_grp"; + function = "ca_spi"; + }; + }; + + ca_trb_pmx: ca_trb@0 { + ca_trb { + groups = "ca_trb_grp"; + function = "ca_trb"; + }; + }; + + ca_uart_debug_pmx: ca_uart_debug@0 { + ca_uart_debug { + groups = "ca_uart_debug_grp"; + function = "ca_uart_debug"; + }; + }; + + clkc_pmx0: clkc@0 { + clkc_0 { + groups = "clkc_grp0"; + function = "clkc_m0"; + }; + }; + + clkc_pmx1: clkc@1 { + clkc_1 { + groups = "clkc_grp1"; + function = "clkc_m1"; + }; + }; + + gn_gnss_i2c_pmx: gn_gnss_i2c@0 { + gn_gnss_i2c { + groups = "gn_gnss_i2c_grp"; + function = "gn_gnss_i2c"; + }; + }; + + gn_gnss_uart_nopause_pmx: gn_gnss_uart_nopause@0 { + gn_gnss_uart_nopause { + groups = "gn_gnss_uart_nopause_grp"; + function = "gn_gnss_uart_nopause"; + }; + }; + + gn_gnss_uart_pmx: gn_gnss_uart@0 { + gn_gnss_uart { + groups = "gn_gnss_uart_grp"; + function = "gn_gnss_uart"; + }; + }; + + gn_trg_spi_pmx0: gn_trg_spi@0 { + gn_trg_spi_0 { + groups = "gn_trg_spi_grp0"; + function = "gn_trg_spi_m0"; + }; + }; + + gn_trg_spi_pmx1: gn_trg_spi@1 { + gn_trg_spi_1 { + groups = "gn_trg_spi_grp1"; + function = "gn_trg_spi_m1"; + }; + }; + + cvbs_dbg_pmx: cvbs_dbg@0 { + cvbs_dbg { + groups = "cvbs_dbg_grp"; + function = "cvbs_dbg"; + }; + }; + + cvbs_dbg_test_pmx0: cvbs_dbg_test@0 { + cvbs_dbg_test_0 { + groups = "cvbs_dbg_test_grp0"; + function = "cvbs_dbg_test_m0"; + }; + }; + + cvbs_dbg_test_pmx1: cvbs_dbg_test@1 { + cvbs_dbg_test_1 { + groups = "cvbs_dbg_test_grp1"; + function = "cvbs_dbg_test_m1"; + }; + }; + + cvbs_dbg_test_pmx2: cvbs_dbg_test@2 { + cvbs_dbg_test_2 { + groups = "cvbs_dbg_test_grp2"; + function = "cvbs_dbg_test_m2"; + }; + }; + + cvbs_dbg_test_pmx3: cvbs_dbg_test@3 { + cvbs_dbg_test_3 { + groups = "cvbs_dbg_test_grp3"; + function = "cvbs_dbg_test_m3"; + }; + }; + + cvbs_dbg_test_pmx4: cvbs_dbg_test@4 { + cvbs_dbg_test_4 { + groups = "cvbs_dbg_test_grp4"; + function = "cvbs_dbg_test_m4"; + }; + }; + + cvbs_dbg_test_pmx5: cvbs_dbg_test@5 { + cvbs_dbg_test_5 { + groups = "cvbs_dbg_test_grp5"; + function = "cvbs_dbg_test_m5"; + }; + }; + + cvbs_dbg_test_pmx6: cvbs_dbg_test@6 { + cvbs_dbg_test_6 { + groups = "cvbs_dbg_test_grp6"; + function = "cvbs_dbg_test_m6"; + }; + }; + + cvbs_dbg_test_pmx7: cvbs_dbg_test@7 { + cvbs_dbg_test_7 { + groups = "cvbs_dbg_test_grp7"; + function = "cvbs_dbg_test_m7"; + }; + }; + + cvbs_dbg_test_pmx8: cvbs_dbg_test@8 { + cvbs_dbg_test_8 { + groups = "cvbs_dbg_test_grp8"; + function = "cvbs_dbg_test_m8"; + }; + }; + + cvbs_dbg_test_pmx9: cvbs_dbg_test@9 { + cvbs_dbg_test_9 { + groups = "cvbs_dbg_test_grp9"; + function = "cvbs_dbg_test_m9"; + }; + }; + + cvbs_dbg_test_pmx10: cvbs_dbg_test@10 { + cvbs_dbg_test_10 { + groups = "cvbs_dbg_test_grp10"; + function = "cvbs_dbg_test_m10"; + }; + }; + + cvbs_dbg_test_pmx11: cvbs_dbg_test@11 { + cvbs_dbg_test_11 { + groups = "cvbs_dbg_test_grp11"; + function = "cvbs_dbg_test_m11"; + }; + }; + + cvbs_dbg_test_pmx12: cvbs_dbg_test@12 { + cvbs_dbg_test_12 { + groups = "cvbs_dbg_test_grp12"; + function = "cvbs_dbg_test_m12"; + }; + }; + + cvbs_dbg_test_pmx13: cvbs_dbg_test@13 { + cvbs_dbg_test_13 { + groups = "cvbs_dbg_test_grp13"; + function = "cvbs_dbg_test_m13"; + }; + }; + + cvbs_dbg_test_pmx14: cvbs_dbg_test@14 { + cvbs_dbg_test_14 { + groups = "cvbs_dbg_test_grp14"; + function = "cvbs_dbg_test_m14"; + }; + }; + + cvbs_dbg_test_pmx15: cvbs_dbg_test@15 { + cvbs_dbg_test_15 { + groups = "cvbs_dbg_test_grp15"; + function = "cvbs_dbg_test_m15"; + }; + }; + + gn_gnss_power_pmx: gn_gnss_power@0 { + gn_gnss_power { + groups = "gn_gnss_power_grp"; + function = "gn_gnss_power"; + }; + }; + + gn_gnss_sw_status_pmx: gn_gnss_sw_status@0 { + gn_gnss_sw_status { + groups = "gn_gnss_sw_status_grp"; + function = "gn_gnss_sw_status"; + }; + }; + + gn_gnss_eclk_pmx: gn_gnss_eclk@0 { + gn_gnss_eclk { + groups = "gn_gnss_eclk_grp"; + function = "gn_gnss_eclk"; + }; + }; + + gn_gnss_irq1_pmx0: gn_gnss_irq1@0 { + gn_gnss_irq1_0 { + groups = "gn_gnss_irq1_grp0"; + function = "gn_gnss_irq1_m0"; + }; + }; + + gn_gnss_irq2_pmx0: gn_gnss_irq2@0 { + gn_gnss_irq2_0 { + groups = "gn_gnss_irq2_grp0"; + function = "gn_gnss_irq2_m0"; + }; + }; + + gn_gnss_tm_pmx: gn_gnss_tm@0 { + gn_gnss_tm { + groups = "gn_gnss_tm_grp"; + function = "gn_gnss_tm"; + }; + }; + + gn_gnss_tsync_pmx: gn_gnss_tsync@0 { + gn_gnss_tsync { + groups = "gn_gnss_tsync_grp"; + function = "gn_gnss_tsync"; + }; + }; + + gn_io_gnsssys_sw_cfg_pmx: gn_io_gnsssys_sw_cfg@0 { + gn_io_gnsssys_sw_cfg { + groups = "gn_io_gnsssys_sw_cfg_grp"; + function = "gn_io_gnsssys_sw_cfg"; + }; + }; + + gn_trg_pmx0: gn_trg@0 { + gn_trg_0 { + groups = "gn_trg_grp0"; + function = "gn_trg_m0"; + }; + }; + + gn_trg_pmx1: gn_trg@1 { + gn_trg_1 { + groups = "gn_trg_grp1"; + function = "gn_trg_m1"; + }; + }; + + gn_trg_shutdown_pmx0: gn_trg_shutdown@0 { + gn_trg_shutdown_0 { + groups = "gn_trg_shutdown_grp0"; + function = "gn_trg_shutdown_m0"; + }; + }; + + gn_trg_shutdown_pmx1: gn_trg_shutdown@1 { + gn_trg_shutdown_1 { + groups = "gn_trg_shutdown_grp1"; + function = "gn_trg_shutdown_m1"; + }; + }; + + gn_trg_shutdown_pmx2: gn_trg_shutdown@2 { + gn_trg_shutdown_2 { + groups = "gn_trg_shutdown_grp2"; + function = "gn_trg_shutdown_m2"; + }; + }; + + gn_trg_shutdown_pmx3: gn_trg_shutdown@3 { + gn_trg_shutdown_3 { + groups = "gn_trg_shutdown_grp3"; + function = "gn_trg_shutdown_m3"; + }; + }; + + i2c0_pmx: i2c0@0 { + i2c0 { + groups = "i2c0_grp"; + function = "i2c0"; + }; + }; + + i2c1_pmx: i2c1@0 { + i2c1 { + groups = "i2c1_grp"; + function = "i2c1"; + }; + }; + + jtag_pmx0: jtag@0 { + jtag_0 { + groups = "jtag_grp0"; + function = "jtag_m0"; + }; + }; + + ks_kas_spi_pmx0: ks_kas_spi@0 { + ks_kas_spi_0 { + groups = "ks_kas_spi_grp0"; + function = "ks_kas_spi_m0"; + }; + }; + + ld_ldd_pmx: ld_ldd@0 { + ld_ldd { + groups = "ld_ldd_grp"; + function = "ld_ldd"; + }; + }; + + ld_ldd_16bit_pmx: ld_ldd_16bit@0 { + ld_ldd_16bit { + groups = "ld_ldd_16bit_grp"; + function = "ld_ldd_16bit"; + }; + }; + + ld_ldd_fck_pmx: ld_ldd_fck@0 { + ld_ldd_fck { + groups = "ld_ldd_fck_grp"; + function = "ld_ldd_fck"; + }; + }; + + ld_ldd_lck_pmx: ld_ldd_lck@0 { + ld_ldd_lck { + groups = "ld_ldd_lck_grp"; + function = "ld_ldd_lck"; + }; + }; + + lr_lcdrom_pmx: lr_lcdrom@0 { + lr_lcdrom { + groups = "lr_lcdrom_grp"; + function = "lr_lcdrom"; + }; + }; + + lvds_analog_pmx: lvds_analog@0 { + lvds_analog { + groups = "lvds_analog_grp"; + function = "lvds_analog"; + }; + }; + + nd_df_pmx: nd_df@0 { + nd_df { + groups = "nd_df_grp"; + function = "nd_df"; + }; + }; + + nd_df_nowp_pmx: nd_df_nowp@0 { + nd_df_nowp { + groups = "nd_df_nowp_grp"; + function = "nd_df_nowp"; + }; + }; + + ps_pmx: ps@0 { + ps { + groups = "ps_grp"; + function = "ps"; + }; + }; + + pwc_core_on_pmx: pwc_core_on@0 { + pwc_core_on { + groups = "pwc_core_on_grp"; + function = "pwc_core_on"; + }; + }; + + pwc_ext_on_pmx: pwc_ext_on@0 { + pwc_ext_on { + groups = "pwc_ext_on_grp"; + function = "pwc_ext_on"; + }; + }; + + pwc_gpio3_clk_pmx: pwc_gpio3_clk@0 { + pwc_gpio3_clk { + groups = "pwc_gpio3_clk_grp"; + function = "pwc_gpio3_clk"; + }; + }; + + pwc_io_on_pmx: pwc_io_on@0 { + pwc_io_on { + groups = "pwc_io_on_grp"; + function = "pwc_io_on"; + }; + }; + + pwc_lowbatt_b_pmx0: pwc_lowbatt_b@0 { + pwc_lowbatt_b_0 { + groups = "pwc_lowbatt_b_grp0"; + function = "pwc_lowbatt_b_m0"; + }; + }; + + pwc_mem_on_pmx: pwc_mem_on@0 { + pwc_mem_on { + groups = "pwc_mem_on_grp"; + function = "pwc_mem_on"; + }; + }; + + pwc_on_key_b_pmx0: pwc_on_key_b@0 { + pwc_on_key_b_0 { + groups = "pwc_on_key_b_grp0"; + function = "pwc_on_key_b_m0"; + }; + }; + + pwc_wakeup_src0_pmx: pwc_wakeup_src0@0 { + pwc_wakeup_src0 { + groups = "pwc_wakeup_src0_grp"; + function = "pwc_wakeup_src0"; + }; + }; + + pwc_wakeup_src1_pmx: pwc_wakeup_src1@0 { + pwc_wakeup_src1 { + groups = "pwc_wakeup_src1_grp"; + function = "pwc_wakeup_src1"; + }; + }; + + pwc_wakeup_src2_pmx: pwc_wakeup_src2@0 { + pwc_wakeup_src2 { + groups = "pwc_wakeup_src2_grp"; + function = "pwc_wakeup_src2"; + }; + }; + + pwc_wakeup_src3_pmx: pwc_wakeup_src3@0 { + pwc_wakeup_src3 { + groups = "pwc_wakeup_src3_grp"; + function = "pwc_wakeup_src3"; + }; + }; + + pw_cko0_pmx0: pw_cko0@0 { + pw_cko0_0 { + groups = "pw_cko0_grp0"; + function = "pw_cko0_m0"; + }; + }; + + pw_cko0_pmx1: pw_cko0@1 { + pw_cko0_1 { + groups = "pw_cko0_grp1"; + function = "pw_cko0_m1"; + }; + }; + + pw_cko0_pmx2: pw_cko0@2 { + pw_cko0_2 { + groups = "pw_cko0_grp2"; + function = "pw_cko0_m2"; + }; + }; + + pw_cko1_pmx0: pw_cko1@0 { + pw_cko1_0 { + groups = "pw_cko1_grp0"; + function = "pw_cko1_m0"; + }; + }; + + pw_cko1_pmx1: pw_cko1@1 { + pw_cko1_1 { + groups = "pw_cko1_grp1"; + function = "pw_cko1_m1"; + }; + }; + + pw_i2s01_clk_pmx0: pw_i2s01_clk@0 { + pw_i2s01_clk_0 { + groups = "pw_i2s01_clk_grp0"; + function = "pw_i2s01_clk_m0"; + }; + }; + + pw_i2s01_clk_pmx1: pw_i2s01_clk@1 { + pw_i2s01_clk_1 { + groups = "pw_i2s01_clk_grp1"; + function = "pw_i2s01_clk_m1"; + }; + }; + + pw_pwm0_pmx: pw_pwm0@0 { + pw_pwm0 { + groups = "pw_pwm0_grp"; + function = "pw_pwm0"; + }; + }; + + pw_pwm1_pmx: pw_pwm1@0 { + pw_pwm1 { + groups = "pw_pwm1_grp"; + function = "pw_pwm1"; + }; + }; + + pw_pwm2_pmx0: pw_pwm2@0 { + pw_pwm2_0 { + groups = "pw_pwm2_grp0"; + function = "pw_pwm2_m0"; + }; + }; + + pw_pwm2_pmx1: pw_pwm2@1 { + pw_pwm2_1 { + groups = "pw_pwm2_grp1"; + function = "pw_pwm2_m1"; + }; + }; + + pw_pwm3_pmx0: pw_pwm3@0 { + pw_pwm3_0 { + groups = "pw_pwm3_grp0"; + function = "pw_pwm3_m0"; + }; + }; + + pw_pwm3_pmx1: pw_pwm3@1 { + pw_pwm3_1 { + groups = "pw_pwm3_grp1"; + function = "pw_pwm3_m1"; + }; + }; + + pw_pwm_cpu_vol_pmx0: pw_pwm_cpu_vol@0 { + pw_pwm_cpu_vol_0 { + groups = "pw_pwm_cpu_vol_grp0"; + function = "pw_pwm_cpu_vol_m0"; + }; + }; + + pw_pwm_cpu_vol_pmx1: pw_pwm_cpu_vol@1 { + pw_pwm_cpu_vol_1 { + groups = "pw_pwm_cpu_vol_grp1"; + function = "pw_pwm_cpu_vol_m1"; + }; + }; + + pw_backlight_pmx0: pw_backlight@0 { + pw_backlight_0 { + groups = "pw_backlight_grp0"; + function = "pw_backlight_m0"; + }; + }; + + pw_backlight_pmx1: pw_backlight@1 { + pw_backlight_1 { + groups = "pw_backlight_grp1"; + function = "pw_backlight_m1"; + }; + }; + + rg_eth_mac_pmx: rg_eth_mac@0 { + rg_eth_mac { + groups = "rg_eth_mac_grp"; + function = "rg_eth_mac"; + }; + }; + + rg_gmac_phy_intr_n_pmx: rg_gmac_phy_intr_n@0 { + rg_gmac_phy_intr_n { + groups = "rg_gmac_phy_intr_n_grp"; + function = "rg_gmac_phy_intr_n"; + }; + }; + + rg_rgmii_mac_pmx: rg_rgmii_mac@0 { + rg_rgmii_mac { + groups = "rg_rgmii_mac_grp"; + function = "rg_rgmii_mac"; + }; + }; + + rg_rgmii_phy_ref_clk_pmx0: rg_rgmii_phy_ref_clk@0 { + rg_rgmii_phy_ref_clk_0 { + groups = + "rg_rgmii_phy_ref_clk_grp0"; + function = + "rg_rgmii_phy_ref_clk_m0"; + }; + }; + + rg_rgmii_phy_ref_clk_pmx1: rg_rgmii_phy_ref_clk@1 { + rg_rgmii_phy_ref_clk_1 { + groups = + "rg_rgmii_phy_ref_clk_grp1"; + function = + "rg_rgmii_phy_ref_clk_m1"; + }; + }; + + sd0_pmx: sd0@0 { + sd0 { + groups = "sd0_grp"; + function = "sd0"; + }; + }; + + sd0_4bit_pmx: sd0_4bit@0 { + sd0_4bit { + groups = "sd0_4bit_grp"; + function = "sd0_4bit"; + }; + }; + + sd1_pmx: sd1@0 { + sd1 { + groups = "sd1_grp"; + function = "sd1"; + }; + }; + + sd1_4bit_pmx0: sd1_4bit@0 { + sd1_4bit_0 { + groups = "sd1_4bit_grp0"; + function = "sd1_4bit_m0"; + }; + }; + + sd1_4bit_pmx1: sd1_4bit@1 { + sd1_4bit_1 { + groups = "sd1_4bit_grp1"; + function = "sd1_4bit_m1"; + }; + }; + + sd2_pmx0: sd2@0 { + sd2_0 { + groups = "sd2_grp0"; + function = "sd2_m0"; + }; + }; + + sd2_no_cdb_pmx0: sd2_no_cdb@0 { + sd2_no_cdb_0 { + groups = "sd2_no_cdb_grp0"; + function = "sd2_no_cdb_m0"; + }; + }; + + sd3_pmx: sd3@0 { + sd3 { + groups = "sd3_grp"; + function = "sd3"; + }; + }; + + sd5_pmx: sd5@0 { + sd5 { + groups = "sd5_grp"; + function = "sd5"; + }; + }; + + sd6_pmx0: sd6@0 { + sd6_0 { + groups = "sd6_grp0"; + function = "sd6_m0"; + }; + }; + + sd6_pmx1: sd6@1 { + sd6_1 { + groups = "sd6_grp1"; + function = "sd6_m1"; + }; + }; + + sp0_ext_ldo_on_pmx: sp0_ext_ldo_on@0 { + sp0_ext_ldo_on { + groups = "sp0_ext_ldo_on_grp"; + function = "sp0_ext_ldo_on"; + }; + }; + + sp0_qspi_pmx: sp0_qspi@0 { + sp0_qspi { + groups = "sp0_qspi_grp"; + function = "sp0_qspi"; + }; + }; + + sp1_spi_pmx: sp1_spi@0 { + sp1_spi { + groups = "sp1_spi_grp"; + function = "sp1_spi"; + }; + }; + + tpiu_trace_pmx: tpiu_trace@0 { + tpiu_trace { + groups = "tpiu_trace_grp"; + function = "tpiu_trace"; + }; + }; + + uart0_pmx: uart0@0 { + uart0 { + groups = "uart0_grp"; + function = "uart0"; + }; + }; + + uart0_nopause_pmx: uart0_nopause@0 { + uart0_nopause { + groups = "uart0_nopause_grp"; + function = "uart0_nopause"; + }; + }; + + uart1_pmx: uart1@0 { + uart1 { + groups = "uart1_grp"; + function = "uart1"; + }; + }; + + uart2_pmx: uart2@0 { + uart2 { + groups = "uart2_grp"; + function = "uart2"; + }; + }; + + uart3_pmx0: uart3@0 { + uart3_0 { + groups = "uart3_grp0"; + function = "uart3_m0"; + }; + }; + + uart3_pmx1: uart3@1 { + uart3_1 { + groups = "uart3_grp1"; + function = "uart3_m1"; + }; + }; + + uart3_pmx2: uart3@2 { + uart3_2 { + groups = "uart3_grp2"; + function = "uart3_m2"; + }; + }; + + uart3_pmx3: uart3@3 { + uart3_3 { + groups = "uart3_grp3"; + function = "uart3_m3"; + }; + }; + + uart3_nopause_pmx0: uart3_nopause@0 { + uart3_nopause_0 { + groups = "uart3_nopause_grp0"; + function = "uart3_nopause_m0"; + }; + }; + + uart3_nopause_pmx1: uart3_nopause@1 { + uart3_nopause_1 { + groups = "uart3_nopause_grp1"; + function = "uart3_nopause_m1"; + }; + }; + + uart4_pmx0: uart4@0 { + uart4_0 { + groups = "uart4_grp0"; + function = "uart4_m0"; + }; + }; + + uart4_pmx1: uart4@1 { + uart4_1 { + groups = "uart4_grp1"; + function = "uart4_m1"; + }; + }; + + uart4_pmx2: uart4@2 { + uart4_2 { + groups = "uart4_grp2"; + function = "uart4_m2"; + }; + }; + + uart4_nopause_pmx: uart4_nopause@0 { + uart4_nopause { + groups = "uart4_nopause_grp"; + function = "uart4_nopause"; + }; + }; + + usb0_drvvbus_pmx: usb0_drvvbus@0 { + usb0_drvvbus { + groups = "usb0_drvvbus_grp"; + function = "usb0_drvvbus"; + }; + }; + + usb1_drvvbus_pmx: usb1_drvvbus@0 { + usb1_drvvbus { + groups = "usb1_drvvbus_grp"; + function = "usb1_drvvbus"; + }; + }; + + visbus_dout_pmx: visbus_dout@0 { + visbus_dout { + groups = "visbus_dout_grp"; + function = "visbus_dout"; + }; + }; + + vi_vip1_pmx: vi_vip1@0 { + vi_vip1 { + groups = "vi_vip1_grp"; + function = "vi_vip1"; + }; + }; + + vi_vip1_ext_pmx: vi_vip1_ext@0 { + vi_vip1_ext { + groups = "vi_vip1_ext_grp"; + function = "vi_vip1_ext"; + }; + }; + + vi_vip1_low8bit_pmx: vi_vip1_low8bit@0 { + vi_vip1_low8bit { + groups = "vi_vip1_low8bit_grp"; + function = "vi_vip1_low8bit"; + }; + }; + + vi_vip1_high8bit_pmx: vi_vip1_high8bit@0 { + vi_vip1_high8bit { + groups = "vi_vip1_high8bit_grp"; + function = "vi_vip1_high8bit"; + }; + }; }; pmipc { @@ -356,6 +1375,12 @@ clock-names = "gpio0_io"; gpio-controller; interrupt-controller; + + gpio-banks = <2>; + gpio-ranges = <&pinctrl 0 0 0>, + <&pinctrl 32 0 0>; + gpio-ranges-group-names = "lvds_gpio_grp", + "uart_nand_gpio_grp"; }; nand@17050000 { @@ -461,11 +1486,22 @@ #interrupt-cells = <2>; compatible = "sirf,atlas7-gpio"; reg = <0x13300000 0x1000>; - interrupts = <0 43 0>, <0 44 0>, <0 45 0>; + interrupts = <0 43 0>, <0 44 0>, + <0 45 0>, <0 46 0>; clocks = <&car 84>; clock-names = "gpio1_io"; gpio-controller; interrupt-controller; + + gpio-banks = <4>; + gpio-ranges = <&pinctrl 0 0 0>, + <&pinctrl 32 0 0>, + <&pinctrl 64 0 0>, + <&pinctrl 96 0 0>; + gpio-ranges-group-names = "gnss_gpio_grp", + "lcd_vip_gpio_grp", + "sdio_i2s_gpio_grp", + "sp_rgmii_gpio_grp"; }; sd2: sdhci@14200000 { @@ -744,6 +1780,10 @@ interrupts = <0 47 0>; gpio-controller; interrupt-controller; + + gpio-banks = <1>; + gpio-ranges = <&pinctrl 0 0 0>; + gpio-ranges-group-names = "rtc_gpio_grp"; }; rtc-iobg@18840000 { diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index aa465904f6cc..096f68be99e2 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -686,7 +686,8 @@ &dcan1 { status = "ok"; - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&dcan1_pins_default>; + pinctrl-names = "default", "sleep", "active"; + pinctrl-0 = <&dcan1_pins_sleep>; pinctrl-1 = <&dcan1_pins_sleep>; + pinctrl-2 = <&dcan1_pins_default>; }; diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index 4e1b60581782..803738414086 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts @@ -587,9 +587,10 @@ &dcan1 { status = "ok"; - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&dcan1_pins_default>; + pinctrl-names = "default", "sleep", "active"; + pinctrl-0 = <&dcan1_pins_sleep>; pinctrl-1 = <&dcan1_pins_sleep>; + pinctrl-2 = <&dcan1_pins_default>; }; &qspi { diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 5ab7548e04e1..9e2444b07bce 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -1321,7 +1321,7 @@ usb0: gadget@00500000 { #address-cells = <1>; #size-cells = <0>; - compatible = "atmel,at91sam9rl-udc"; + compatible = "atmel,sama5d3-udc"; reg = <0x00500000 0x100000 0xf8030000 0x4000>; interrupts = <33 IRQ_TYPE_LEVEL_HIGH 2>; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 653a1f851f2b..3ee22ee13c5a 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -127,7 +127,7 @@ usb0: gadget@00400000 { #address-cells = <1>; #size-cells = <0>; - compatible = "atmel,at91sam9rl-udc"; + compatible = "atmel,sama5d3-udc"; reg = <0x00400000 0x100000 0xfc02c000 0x4000>; interrupts = <47 IRQ_TYPE_LEVEL_HIGH 2>; diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index 107395c32d82..17f63f7dfd9e 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -150,6 +150,16 @@ interface-type = "ace"; reg = <0x5000 0x1000>; }; + + pmu@9000 { + compatible = "arm,cci-400-pmu,r0"; + reg = <0x9000 0x5000>; + interrupts = <0 105 4>, + <0 101 4>, + <0 102 4>, + <0 103 4>, + <0 104 4>; + }; }; memory-controller@7ffd0000 { @@ -187,11 +197,22 @@ <1 10 0xf08>; }; - pmu { + pmu_a15 { compatible = "arm,cortex-a15-pmu"; interrupts = <0 68 4>, <0 69 4>; - interrupt-affinity = <&cpu0>, <&cpu1>; + interrupt-affinity = <&cpu0>, + <&cpu1>; + }; + + pmu_a7 { + compatible = "arm,cortex-a7-pmu"; + interrupts = <0 128 4>, + <0 129 4>, + <0 130 4>; + interrupt-affinity = <&cpu2>, + <&cpu3>, + <&cpu4>; }; oscclk6a: oscclk6a { diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index fd6a6d23bc20..5fd8df6f50ea 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -169,6 +169,7 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_ATMEL=y +CONFIG_MTD_NAND_BRCMNAND=y CONFIG_MTD_NAND_DAVINCI=y CONFIG_MTD_SPI_NOR=y CONFIG_MTD_UBI=y @@ -352,7 +353,6 @@ CONFIG_POWER_RESET_AS3722=y CONFIG_POWER_RESET_GPIO=y CONFIG_POWER_RESET_GPIO_RESTART=y CONFIG_POWER_RESET_KEYSTONE=y -CONFIG_POWER_RESET_SUN6I=y CONFIG_POWER_RESET_RMOBILE=y CONFIG_SENSORS_LM90=y CONFIG_SENSORS_LM95245=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 8ecba00dcd83..7ebc346bf9fa 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -2,6 +2,7 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_PERF_EVENTS=y +CONFIG_MODULES=y CONFIG_ARCH_SUNXI=y CONFIG_SMP=y CONFIG_NR_CPUS=8 @@ -77,7 +78,6 @@ CONFIG_SPI_SUN6I=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_SUPPLY=y CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_SUN6I=y CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y CONFIG_WATCHDOG=y @@ -87,6 +87,10 @@ CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_GPIO=y +CONFIG_FB=y +CONFIG_FB_SIMPLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_USB=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 1c3938f26beb..485982084fe9 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) * The _caller variety takes a __builtin_return_address(0) value for * /proc/vmalloc to use - and should only be used in non-inline functions. */ -extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, - size_t, unsigned int, void *); extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, void *); - extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); extern void __iounmap(volatile void __iomem *addr); -extern void __arm_iounmap(volatile void __iomem *addr); extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); @@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t); static inline void memset_io(volatile void __iomem *dst, unsigned c, size_t count) { - memset((void __force *)dst, c, count); + extern void mmioset(void *, unsigned int, size_t); + mmioset((void __force *)dst, c, count); } #define memset_io(dst,c,count) memset_io(dst,c,count) static inline void memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { - memcpy(to, (const void __force *)from, count); + extern void mmiocpy(void *, const void *, size_t); + mmiocpy(to, (const void __force *)from, count); } #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) static inline void memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { - memcpy((void __force *)to, from, count); + extern void mmiocpy(void *, const void *, size_t); + mmiocpy((void __force *)to, from, count); } #define memcpy_toio(to,from,count) memcpy_toio(to,from,count) @@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from, #endif /* readl */ /* - * ioremap and friends. + * ioremap() and friends. + * + * ioremap() takes a resource address, and size. Due to the ARM memory + * types, it is important to use the correct ioremap() function as each + * mapping has specific properties. + * + * Function Memory type Cacheability Cache hint + * ioremap() Device n/a n/a + * ioremap_nocache() Device n/a n/a + * ioremap_cache() Normal Writeback Read allocate + * ioremap_wc() Normal Non-cacheable n/a + * ioremap_wt() Normal Non-cacheable n/a + * + * All device mappings have the following properties: + * - no access speculation + * - no repetition (eg, on return from an exception) + * - number, order and size of accesses are maintained + * - unaligned accesses are "unpredictable" + * - writes may be delayed before they hit the endpoint device * - * ioremap takes a PCI memory address, as specified in - * Documentation/io-mapping.txt. + * ioremap_nocache() is the same as ioremap() as there are too many device + * drivers using this for device registers, and documentation which tells + * people to use it for such for this to be any different. This is not a + * safe fallback for memory-like mappings, or memory regions where the + * compiler may generate unaligned accesses - eg, via inlining its own + * memcpy. * + * All normal memory mappings have the following properties: + * - reads can be repeated with no side effects + * - repeated reads return the last value written + * - reads can fetch additional locations without side effects + * - writes can be repeated (in certain cases) with no side effects + * - writes can be merged before accessing the target + * - unaligned accesses can be supported + * - ordering is not guaranteed without explicit dependencies or barrier + * instructions + * - writes may be delayed before they hit the endpoint memory + * + * The cache hint is only a performance hint: CPUs may alias these hints. + * Eg, a CPU not implementing read allocate but implementing write allocate + * will provide a write allocate mapping instead. */ -#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) -#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) -#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define iounmap __arm_iounmap +void __iomem *ioremap(resource_size_t res_cookie, size_t size); +#define ioremap ioremap +#define ioremap_nocache ioremap + +void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size); +#define ioremap_cache ioremap_cache + +void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size); +#define ioremap_wc ioremap_wc +#define ioremap_wt ioremap_wc + +void iounmap(volatile void __iomem *iomem_cookie); +#define iounmap iounmap /* * io{read,write}{16,32}be() macros diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 3a72d69b3255..6f225acc07c5 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x) */ #define __pa(x) __virt_to_phys((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index bfd662e49a25..aeddd28b3595 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -129,7 +129,36 @@ /* * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB + * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B + * ARMv6+ without TEX remapping, they are a table index. + * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B + * + * MT type Pre-ARMv6 ARMv6+ type / cacheable status + * UNCACHED Uncached Strongly ordered + * BUFFERABLE Bufferable Normal memory / non-cacheable + * WRITETHROUGH Writethrough Normal memory / write through + * WRITEBACK Writeback Normal memory / write back, read alloc + * MINICACHE Minicache N/A + * WRITEALLOC Writeback Normal memory / write back, write alloc + * DEV_SHARED Uncached Device memory (shared) + * DEV_NONSHARED Uncached Device memory (non-shared) + * DEV_WC Bufferable Normal memory / non-cacheable + * DEV_CACHED Writeback Normal memory / write back, read alloc + * VECTORS Variable Normal memory / variable + * + * All normal memory mappings have the following properties: + * - reads can be repeated with no side effects + * - repeated reads return the last value written + * - reads can fetch additional locations without side effects + * - writes can be repeated (in certain cases) with no side effects + * - writes can be merged before accessing the target + * - unaligned accesses can be supported + * + * All device mappings have the following properties: + * - no access speculation + * - no repetition (eg, on return from an exception) + * - number, order and size of accesses are maintained + * - unaligned accesses are "unpredictable" */ #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index a88671cfe1ff..5e5a51a99e68 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void); extern void fpundefinstr(void); +void mmioset(void *, unsigned int, size_t); +void mmiocpy(void *, const void *, size_t); + /* platform dependent support */ EXPORT_SYMBOL(arm_delay_ops); @@ -88,6 +91,9 @@ EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(__memzero); +EXPORT_SYMBOL(mmioset); +EXPORT_SYMBOL(mmiocpy); + #ifdef CONFIG_MMU EXPORT_SYMBOL(copy_page); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 7dac3086e361..cb4fb1e69778 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -410,7 +410,7 @@ ENDPROC(__fiq_abt) zero_fp .if \trace -#ifdef CONFIG_IRQSOFF_TRACER +#ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif ct_user_exit save = 0 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 90dfbedfbfb8..3d6b7821cff8 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -578,7 +578,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) struct pt_regs *old_regs = set_irq_regs(regs); if ((unsigned)ipinr < NR_IPI) { - trace_ipi_entry(ipi_types[ipinr]); + trace_ipi_entry_rcuidle(ipi_types[ipinr]); __inc_irq_stat(cpu, ipi_irqs[ipinr]); } @@ -637,7 +637,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) } if ((unsigned)ipinr < NR_IPI) - trace_ipi_exit(ipi_types[ipinr]); + trace_ipi_exit_rcuidle(ipi_types[ipinr]); set_irq_regs(old_regs); } diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S index 7797e81e40e0..64111bd4440b 100644 --- a/arch/arm/lib/memcpy.S +++ b/arch/arm/lib/memcpy.S @@ -61,8 +61,10 @@ /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ +ENTRY(mmiocpy) ENTRY(memcpy) #include "copy_template.S" ENDPROC(memcpy) +ENDPROC(mmiocpy) diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index a4ee97b5a2bf..3c65e3bd790f 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -16,6 +16,7 @@ .text .align 5 +ENTRY(mmioset) ENTRY(memset) UNWIND( .fnstart ) ands r3, r0, #3 @ 1 unaligned? @@ -133,3 +134,4 @@ UNWIND( .fnstart ) b 1b UNWIND( .fnend ) ENDPROC(memset) +ENDPROC(mmioset) diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index e9184feffc4e..0ac9e4b3b265 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -19,7 +19,6 @@ config ARCH_BCM_IPROC select ARCH_REQUIRE_GPIOLIB select ARM_AMBA select PINCTRL - select MTD_NAND_BRCMNAND help This enables support for systems based on Broadcom IPROC architected SoCs. The IPROC complex contains one or more ARM CPUs along with common diff --git a/arch/arm/mach-dove/include/mach/irqs.h b/arch/arm/mach-dove/include/mach/irqs.h index 03d401d20453..3f29e6bca058 100644 --- a/arch/arm/mach-dove/include/mach/irqs.h +++ b/arch/arm/mach-dove/include/mach/irqs.h @@ -14,73 +14,73 @@ /* * Dove Low Interrupt Controller */ -#define IRQ_DOVE_BRIDGE 0 -#define IRQ_DOVE_H2C 1 -#define IRQ_DOVE_C2H 2 -#define IRQ_DOVE_NAND 3 -#define IRQ_DOVE_PDMA 4 -#define IRQ_DOVE_SPI1 5 -#define IRQ_DOVE_SPI0 6 -#define IRQ_DOVE_UART_0 7 -#define IRQ_DOVE_UART_1 8 -#define IRQ_DOVE_UART_2 9 -#define IRQ_DOVE_UART_3 10 -#define IRQ_DOVE_I2C 11 -#define IRQ_DOVE_GPIO_0_7 12 -#define IRQ_DOVE_GPIO_8_15 13 -#define IRQ_DOVE_GPIO_16_23 14 -#define IRQ_DOVE_PCIE0_ERR 15 -#define IRQ_DOVE_PCIE0 16 -#define IRQ_DOVE_PCIE1_ERR 17 -#define IRQ_DOVE_PCIE1 18 -#define IRQ_DOVE_I2S0 19 -#define IRQ_DOVE_I2S0_ERR 20 -#define IRQ_DOVE_I2S1 21 -#define IRQ_DOVE_I2S1_ERR 22 -#define IRQ_DOVE_USB_ERR 23 -#define IRQ_DOVE_USB0 24 -#define IRQ_DOVE_USB1 25 -#define IRQ_DOVE_GE00_RX 26 -#define IRQ_DOVE_GE00_TX 27 -#define IRQ_DOVE_GE00_MISC 28 -#define IRQ_DOVE_GE00_SUM 29 -#define IRQ_DOVE_GE00_ERR 30 -#define IRQ_DOVE_CRYPTO 31 +#define IRQ_DOVE_BRIDGE (1 + 0) +#define IRQ_DOVE_H2C (1 + 1) +#define IRQ_DOVE_C2H (1 + 2) +#define IRQ_DOVE_NAND (1 + 3) +#define IRQ_DOVE_PDMA (1 + 4) +#define IRQ_DOVE_SPI1 (1 + 5) +#define IRQ_DOVE_SPI0 (1 + 6) +#define IRQ_DOVE_UART_0 (1 + 7) +#define IRQ_DOVE_UART_1 (1 + 8) +#define IRQ_DOVE_UART_2 (1 + 9) +#define IRQ_DOVE_UART_3 (1 + 10) +#define IRQ_DOVE_I2C (1 + 11) +#define IRQ_DOVE_GPIO_0_7 (1 + 12) +#define IRQ_DOVE_GPIO_8_15 (1 + 13) +#define IRQ_DOVE_GPIO_16_23 (1 + 14) +#define IRQ_DOVE_PCIE0_ERR (1 + 15) +#define IRQ_DOVE_PCIE0 (1 + 16) +#define IRQ_DOVE_PCIE1_ERR (1 + 17) +#define IRQ_DOVE_PCIE1 (1 + 18) +#define IRQ_DOVE_I2S0 (1 + 19) +#define IRQ_DOVE_I2S0_ERR (1 + 20) +#define IRQ_DOVE_I2S1 (1 + 21) +#define IRQ_DOVE_I2S1_ERR (1 + 22) +#define IRQ_DOVE_USB_ERR (1 + 23) +#define IRQ_DOVE_USB0 (1 + 24) +#define IRQ_DOVE_USB1 (1 + 25) +#define IRQ_DOVE_GE00_RX (1 + 26) +#define IRQ_DOVE_GE00_TX (1 + 27) +#define IRQ_DOVE_GE00_MISC (1 + 28) +#define IRQ_DOVE_GE00_SUM (1 + 29) +#define IRQ_DOVE_GE00_ERR (1 + 30) +#define IRQ_DOVE_CRYPTO (1 + 31) /* * Dove High Interrupt Controller */ -#define IRQ_DOVE_AC97 32 -#define IRQ_DOVE_PMU 33 -#define IRQ_DOVE_CAM 34 -#define IRQ_DOVE_SDIO0 35 -#define IRQ_DOVE_SDIO1 36 -#define IRQ_DOVE_SDIO0_WAKEUP 37 -#define IRQ_DOVE_SDIO1_WAKEUP 38 -#define IRQ_DOVE_XOR_00 39 -#define IRQ_DOVE_XOR_01 40 -#define IRQ_DOVE_XOR0_ERR 41 -#define IRQ_DOVE_XOR_10 42 -#define IRQ_DOVE_XOR_11 43 -#define IRQ_DOVE_XOR1_ERR 44 -#define IRQ_DOVE_LCD_DCON 45 -#define IRQ_DOVE_LCD1 46 -#define IRQ_DOVE_LCD0 47 -#define IRQ_DOVE_GPU 48 -#define IRQ_DOVE_PERFORM_MNTR 49 -#define IRQ_DOVE_VPRO_DMA1 51 -#define IRQ_DOVE_SSP_TIMER 54 -#define IRQ_DOVE_SSP 55 -#define IRQ_DOVE_MC_L2_ERR 56 -#define IRQ_DOVE_CRYPTO_ERR 59 -#define IRQ_DOVE_GPIO_24_31 60 -#define IRQ_DOVE_HIGH_GPIO 61 -#define IRQ_DOVE_SATA 62 +#define IRQ_DOVE_AC97 (1 + 32) +#define IRQ_DOVE_PMU (1 + 33) +#define IRQ_DOVE_CAM (1 + 34) +#define IRQ_DOVE_SDIO0 (1 + 35) +#define IRQ_DOVE_SDIO1 (1 + 36) +#define IRQ_DOVE_SDIO0_WAKEUP (1 + 37) +#define IRQ_DOVE_SDIO1_WAKEUP (1 + 38) +#define IRQ_DOVE_XOR_00 (1 + 39) +#define IRQ_DOVE_XOR_01 (1 + 40) +#define IRQ_DOVE_XOR0_ERR (1 + 41) +#define IRQ_DOVE_XOR_10 (1 + 42) +#define IRQ_DOVE_XOR_11 (1 + 43) +#define IRQ_DOVE_XOR1_ERR (1 + 44) +#define IRQ_DOVE_LCD_DCON (1 + 45) +#define IRQ_DOVE_LCD1 (1 + 46) +#define IRQ_DOVE_LCD0 (1 + 47) +#define IRQ_DOVE_GPU (1 + 48) +#define IRQ_DOVE_PERFORM_MNTR (1 + 49) +#define IRQ_DOVE_VPRO_DMA1 (1 + 51) +#define IRQ_DOVE_SSP_TIMER (1 + 54) +#define IRQ_DOVE_SSP (1 + 55) +#define IRQ_DOVE_MC_L2_ERR (1 + 56) +#define IRQ_DOVE_CRYPTO_ERR (1 + 59) +#define IRQ_DOVE_GPIO_24_31 (1 + 60) +#define IRQ_DOVE_HIGH_GPIO (1 + 61) +#define IRQ_DOVE_SATA (1 + 62) /* * DOVE General Purpose Pins */ -#define IRQ_DOVE_GPIO_START 64 +#define IRQ_DOVE_GPIO_START 65 #define NR_GPIO_IRQS 64 /* diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index 4a5a7aedcb76..df0223f76fa9 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -126,14 +126,14 @@ __exception_irq_entry dove_legacy_handle_irq(struct pt_regs *regs) stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_LOW_OFF); stat &= readl_relaxed(dove_irq_base + IRQ_MASK_LOW_OFF); if (stat) { - unsigned int hwirq = __fls(stat); + unsigned int hwirq = 1 + __fls(stat); handle_IRQ(hwirq, regs); return; } stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_HIGH_OFF); stat &= readl_relaxed(dove_irq_base + IRQ_MASK_HIGH_OFF); if (stat) { - unsigned int hwirq = 32 + __fls(stat); + unsigned int hwirq = 33 + __fls(stat); handle_IRQ(hwirq, regs); return; } @@ -144,8 +144,8 @@ void __init dove_init_irq(void) { int i; - orion_irq_init(0, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF); - orion_irq_init(32, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF); + orion_irq_init(1, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF); + orion_irq_init(33, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF); #ifdef CONFIG_MULTI_IRQ_HANDLER set_handle_irq(dove_legacy_handle_irq); diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c index 6dfd4ab97b2a..301ab38d38ba 100644 --- a/arch/arm/mach-mvebu/pm-board.c +++ b/arch/arm/mach-mvebu/pm-board.c @@ -43,6 +43,9 @@ static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd) for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++) ackcmd |= BIT(pic_raw_gpios[i]); + srcmd = cpu_to_le32(srcmd); + ackcmd = cpu_to_le32(ackcmd); + /* * Wait a while, the PIC needs quite a bit of time between the * two GPIO commands. diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index f1a68c63dc99..903c85be2897 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -274,8 +274,5 @@ obj-y += $(nand-m) $(nand-y) smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o obj-y += $(smsc911x-m) $(smsc911x-y) -ifneq ($(CONFIG_HWSPINLOCK_OMAP),) -obj-y += hwspinlock.o -endif obj-y += common-board-devices.o twl-common.o dss-common.o diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c index e1a56d87599e..1ed4be184a29 100644 --- a/arch/arm/mach-omap2/dma.c +++ b/arch/arm/mach-omap2/dma.c @@ -117,7 +117,6 @@ static void omap2_show_dma_caps(void) u8 revision = dma_read(REVISION, 0) & 0xff; printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n", revision >> 4, revision & 0xf); - return; } static unsigned configure_dma_errata(void) diff --git a/arch/arm/mach-omap2/hwspinlock.c b/arch/arm/mach-omap2/hwspinlock.c deleted file mode 100644 index ef175acaeaa2..000000000000 --- a/arch/arm/mach-omap2/hwspinlock.c +++ /dev/null @@ -1,60 +0,0 @@ -/* - * OMAP hardware spinlock device initialization - * - * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com - * - * Contact: Simon Que <sque@ti.com> - * Hari Kanigeri <h-kanigeri2@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/err.h> -#include <linux/hwspinlock.h> - -#include "soc.h" -#include "omap_hwmod.h" -#include "omap_device.h" - -static struct hwspinlock_pdata omap_hwspinlock_pdata __initdata = { - .base_id = 0, -}; - -static int __init hwspinlocks_init(void) -{ - int retval = 0; - struct omap_hwmod *oh; - struct platform_device *pdev; - const char *oh_name = "spinlock"; - const char *dev_name = "omap_hwspinlock"; - - /* - * Hwmod lookup will fail in case our platform doesn't support the - * hardware spinlock module, so it is safe to run this initcall - * on all omaps - */ - oh = omap_hwmod_lookup(oh_name); - if (oh == NULL) - return -EINVAL; - - pdev = omap_device_build(dev_name, 0, oh, &omap_hwspinlock_pdata, - sizeof(struct hwspinlock_pdata)); - if (IS_ERR(pdev)) { - pr_err("Can't build omap_device for %s:%s\n", dev_name, - oh_name); - retval = PTR_ERR(pdev); - } - - return retval; -} -/* early board code might need to reserve specific hwspinlock instances */ -omap_postcore_initcall(hwspinlocks_init); diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig index e03d8b5c9ad0..9ab8932403e5 100644 --- a/arch/arm/mach-prima2/Kconfig +++ b/arch/arm/mach-prima2/Kconfig @@ -4,6 +4,7 @@ menuconfig ARCH_SIRF select ARCH_REQUIRE_GPIOLIB select GENERIC_IRQ_CHIP select NO_IOPORT_MAP + select REGMAP select PINCTRL select PINCTRL_SIRF help diff --git a/arch/arm/mach-prima2/rtciobrg.c b/arch/arm/mach-prima2/rtciobrg.c index 8f66d8f7ca75..d4852d24dc7d 100644 --- a/arch/arm/mach-prima2/rtciobrg.c +++ b/arch/arm/mach-prima2/rtciobrg.c @@ -1,5 +1,5 @@ /* - * RTC I/O Bridge interfaces for CSR SiRFprimaII + * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7 * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> +#include <linux/regmap.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> @@ -66,6 +67,7 @@ u32 sirfsoc_rtc_iobrg_readl(u32 addr) { unsigned long flags, val; + /* TODO: add hwspinlock to sync with M3 */ spin_lock_irqsave(&rtciobrg_lock, flags); val = __sirfsoc_rtc_iobrg_readl(addr); @@ -90,6 +92,7 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr) { unsigned long flags; + /* TODO: add hwspinlock to sync with M3 */ spin_lock_irqsave(&rtciobrg_lock, flags); sirfsoc_rtc_iobrg_pre_writel(val, addr); @@ -102,6 +105,45 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr) } EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel); + +static int regmap_iobg_regwrite(void *context, unsigned int reg, + unsigned int val) +{ + sirfsoc_rtc_iobrg_writel(val, reg); + return 0; +} + +static int regmap_iobg_regread(void *context, unsigned int reg, + unsigned int *val) +{ + *val = (u32)sirfsoc_rtc_iobrg_readl(reg); + return 0; +} + +static struct regmap_bus regmap_iobg = { + .reg_write = regmap_iobg_regwrite, + .reg_read = regmap_iobg_regread, +}; + +/** + * devm_regmap_init_iobg(): Initialise managed register map + * + * @iobg: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_iobg(struct device *dev, + const struct regmap_config *config) +{ + const struct regmap_bus *bus = ®map_iobg; + + return devm_regmap_init(dev, bus, dev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_iobg); + static const struct of_device_id rtciobrg_ids[] = { { .compatible = "sirf,prima2-rtciobg" }, {} @@ -132,7 +174,7 @@ static int __init sirfsoc_rtciobrg_init(void) } postcore_initcall(sirfsoc_rtciobrg_init); -MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, " - "Barry Song <baohua.song@csr.com>"); +MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>"); +MODULE_AUTHOR("Barry Song <baohua.song@csr.com>"); MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge"); MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index 81502b90dd91..4efe2d43a126 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig @@ -35,7 +35,7 @@ config MACH_SUN7I select SUN5I_HSTIMER config MACH_SUN8I - bool "Allwinner A23 (sun8i) SoCs support" + bool "Allwinner sun8i Family SoCs support" default ARCH_SUNXI select ARM_GIC select MFD_SUN6I_PRCM diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 1bc811a74a9f..65bab2876343 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c @@ -67,10 +67,13 @@ MACHINE_END static const char * const sun8i_board_dt_compat[] = { "allwinner,sun8i-a23", + "allwinner,sun8i-a33", + "allwinner,sun8i-h3", NULL, }; -DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i (A23) Family") +DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family") + .init_time = sun6i_timer_init, .dt_compat = sun8i_board_dt_compat, .init_late = sunxi_dt_cpufreq_init, MACHINE_END diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index d1e5ad7ab3bc..0c81056c1dd7 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, } #endif -void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, +static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype, void *caller) { const struct mem_type *type; @@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype) { return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, - __builtin_return_address(0)); + __builtin_return_address(0)); } EXPORT_SYMBOL(__arm_ioremap_pfn); @@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *) = __arm_ioremap_caller; -void __iomem * -__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) +void __iomem *ioremap(resource_size_t res_cookie, size_t size) +{ + return arch_ioremap_caller(res_cookie, size, MT_DEVICE, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap); + +void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) +{ + return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_cache); + +void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) { - return arch_ioremap_caller(phys_addr, size, mtype, - __builtin_return_address(0)); + return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC, + __builtin_return_address(0)); } -EXPORT_SYMBOL(__arm_ioremap); +EXPORT_SYMBOL(ioremap_wc); /* * Remap an arbitrary physical address space into the kernel virtual @@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr) void (*arch_iounmap)(volatile void __iomem *) = __iounmap; -void __arm_iounmap(volatile void __iomem *io_addr) +void iounmap(volatile void __iomem *cookie) { - arch_iounmap(io_addr); + arch_iounmap(cookie); } -EXPORT_SYMBOL(__arm_iounmap); +EXPORT_SYMBOL(iounmap); #ifdef CONFIG_PCI static int pci_ioremap_mem_type = MT_DEVICE; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6ca7d9aa896f..870838a46d52 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1072,6 +1072,7 @@ void __init sanity_check_meminfo(void) int highmem = 0; phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; struct memblock_region *reg; + bool should_use_highmem = false; for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; @@ -1090,6 +1091,7 @@ void __init sanity_check_meminfo(void) pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", &block_start, &block_end); memblock_remove(reg->base, reg->size); + should_use_highmem = true; continue; } @@ -1100,6 +1102,7 @@ void __init sanity_check_meminfo(void) &block_start, &block_end, &vmalloc_limit); memblock_remove(vmalloc_limit, overlap_size); block_end = vmalloc_limit; + should_use_highmem = true; } } @@ -1134,6 +1137,9 @@ void __init sanity_check_meminfo(void) } } + if (should_use_highmem) + pr_notice("Consider using a HIGHMEM enabled kernel.\n"); + high_memory = __va(arm_lowmem_limit - 1) + 1; /* @@ -1494,6 +1500,7 @@ void __init paging_init(const struct machine_desc *mdesc) build_mem_type_table(); prepare_page_table(); map_lowmem(); + memblock_set_current_limit(arm_lowmem_limit); dma_contiguous_remap(); devicemaps_init(mdesc); kmap_init(); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index afd7e05d95f1..1dd10936d68d 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, } EXPORT_SYMBOL(__arm_ioremap_pfn); -void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, - size_t size, unsigned int mtype, void *caller) +void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, + unsigned int mtype, void *caller) { - return __arm_ioremap_pfn(pfn, offset, size, mtype); + return (void __iomem *)phys_addr; } -void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, - unsigned int mtype) +void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); + +void __iomem *ioremap(resource_size_t res_cookie, size_t size) { - return (void __iomem *)phys_addr; + return __arm_ioremap_caller(res_cookie, size, MT_DEVICE, + __builtin_return_address(0)); } -EXPORT_SYMBOL(__arm_ioremap); +EXPORT_SYMBOL(ioremap); -void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); +void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) +{ + return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_cache); -void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, - unsigned int mtype, void *caller) +void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) +{ + return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_wc); + +void __iounmap(volatile void __iomem *addr) { - return __arm_ioremap(phys_addr, size, mtype); } +EXPORT_SYMBOL(__iounmap); void (*arch_iounmap)(volatile void __iomem *); -void __arm_iounmap(volatile void __iomem *addr) +void iounmap(volatile void __iomem *addr) { } -EXPORT_SYMBOL(__arm_iounmap); +EXPORT_SYMBOL(iounmap); diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c index 9005b07296c8..aedec81d1198 100644 --- a/arch/arm/vdso/vdsomunge.c +++ b/arch/arm/vdso/vdsomunge.c @@ -45,13 +45,11 @@ * it does. */ -#define _GNU_SOURCE - #include <byteswap.h> #include <elf.h> #include <errno.h> -#include <error.h> #include <fcntl.h> +#include <stdarg.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> @@ -82,11 +80,25 @@ #define EF_ARM_ABI_FLOAT_HARD 0x400 #endif +static int failed; +static const char *argv0; static const char *outfile; +static void fail(const char *fmt, ...) +{ + va_list ap; + + failed = 1; + fprintf(stderr, "%s: ", argv0); + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + exit(EXIT_FAILURE); +} + static void cleanup(void) { - if (error_message_count > 0 && outfile != NULL) + if (failed && outfile != NULL) unlink(outfile); } @@ -119,68 +131,66 @@ int main(int argc, char **argv) int infd; atexit(cleanup); + argv0 = argv[0]; if (argc != 3) - error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]); + fail("Usage: %s [infile] [outfile]\n", argv[0]); infile = argv[1]; outfile = argv[2]; infd = open(infile, O_RDONLY); if (infd < 0) - error(EXIT_FAILURE, errno, "Cannot open %s", infile); + fail("Cannot open %s: %s\n", infile, strerror(errno)); if (fstat(infd, &stat) != 0) - error(EXIT_FAILURE, errno, "Failed stat for %s", infile); + fail("Failed stat for %s: %s\n", infile, strerror(errno)); inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0); if (inbuf == MAP_FAILED) - error(EXIT_FAILURE, errno, "Failed to map %s", infile); + fail("Failed to map %s: %s\n", infile, strerror(errno)); close(infd); inhdr = inbuf; if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0) - error(EXIT_FAILURE, 0, "Not an ELF file"); + fail("Not an ELF file\n"); if (inhdr->e_ident[EI_CLASS] != ELFCLASS32) - error(EXIT_FAILURE, 0, "Unsupported ELF class"); + fail("Unsupported ELF class\n"); swap = inhdr->e_ident[EI_DATA] != HOST_ORDER; if (read_elf_half(inhdr->e_type, swap) != ET_DYN) - error(EXIT_FAILURE, 0, "Not a shared object"); + fail("Not a shared object\n"); - if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) { - error(EXIT_FAILURE, 0, "Unsupported architecture %#x", - inhdr->e_machine); - } + if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) + fail("Unsupported architecture %#x\n", inhdr->e_machine); e_flags = read_elf_word(inhdr->e_flags, swap); if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) { - error(EXIT_FAILURE, 0, "Unsupported EABI version %#x", - EF_ARM_EABI_VERSION(e_flags)); + fail("Unsupported EABI version %#x\n", + EF_ARM_EABI_VERSION(e_flags)); } if (e_flags & EF_ARM_ABI_FLOAT_HARD) - error(EXIT_FAILURE, 0, - "Unexpected hard-float flag set in e_flags"); + fail("Unexpected hard-float flag set in e_flags\n"); clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT); outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); if (outfd < 0) - error(EXIT_FAILURE, errno, "Cannot open %s", outfile); + fail("Cannot open %s: %s\n", outfile, strerror(errno)); if (ftruncate(outfd, stat.st_size) != 0) - error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile); + fail("Cannot truncate %s: %s\n", outfile, strerror(errno)); outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, outfd, 0); if (outbuf == MAP_FAILED) - error(EXIT_FAILURE, errno, "Failed to map %s", outfile); + fail("Failed to map %s: %s\n", outfile, strerror(errno)); close(outfd); @@ -195,7 +205,7 @@ int main(int argc, char **argv) } if (msync(outbuf, stat.st_size, MS_SYNC) != 0) - error(EXIT_FAILURE, errno, "Failed to sync %s", outfile); + fail("Failed to sync %s: %s\n", outfile, strerror(errno)); return EXIT_SUCCESS; } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0f6edb14b7e4..318175f62c24 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -23,9 +23,9 @@ config ARM64 select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select EDAC_SUPPORT select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS + select EDAC_SUPPORT select GENERIC_ALLOCATOR select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST if SMP diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts index 83578e766b94..4c55833d8a41 100644 --- a/arch/arm64/boot/dts/apm/apm-mustang.dts +++ b/arch/arm64/boot/dts/apm/apm-mustang.dts @@ -23,6 +23,16 @@ device_type = "memory"; reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */ }; + + gpio-keys { + compatible = "gpio-keys"; + button@1 { + label = "POWER"; + linux,code = <116>; + linux,input-type = <0x1>; + interrupts = <0x0 0x2d 0x1>; + }; + }; }; &pcie0clk { diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 0bb287ca0a98..0689c3fb56e3 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -717,6 +717,19 @@ phy-names = "sata-phy"; }; + sbgpio: sbgpio@17001000{ + compatible = "apm,xgene-gpio-sb"; + reg = <0x0 0x17001000 0x0 0x400>; + #gpio-cells = <2>; + gpio-controller; + interrupts = <0x0 0x28 0x1>, + <0x0 0x29 0x1>, + <0x0 0x2a 0x1>, + <0x0 0x2b 0x1>, + <0x0 0x2c 0x1>, + <0x0 0x2d 0x1>; + }; + rtc: rtc@10510000 { compatible = "apm,xgene-rtc"; reg = <0x0 0x10510000 0x0 0x400>; diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile index c5c98b91514e..bb3c07209676 100644 --- a/arch/arm64/boot/dts/arm/Makefile +++ b/arch/arm64/boot/dts/arm/Makefile @@ -1,6 +1,7 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb +dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb always := $(dtb-y) subdir-y := $(dts-dirs) diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts new file mode 100644 index 000000000000..5b1d0181023b --- /dev/null +++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts @@ -0,0 +1,191 @@ +/* + * ARM Ltd. Versatile Express + * + * LogicTile Express 20MG + * V2F-1XV7 + * + * Cortex-A53 (2 cores) Soft Macrocell Model + * + * HBI-0247C + */ + +/dts-v1/; + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +/ { + model = "V2F-1XV7 Cortex-A53x2 SMM"; + arm,hbi = <0x247>; + arm,vexpress,site = <0xf>; + compatible = "arm,vexpress,v2f-1xv7,ca53x2", "arm,vexpress,v2f-1xv7", "arm,vexpress"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + chosen { + stdout-path = "serial0:38400n8"; + }; + + aliases { + serial0 = &v2m_serial0; + serial1 = &v2m_serial1; + serial2 = &v2m_serial2; + serial3 = &v2m_serial3; + i2c0 = &v2m_i2c_dvi; + i2c1 = &v2m_i2c_pcie; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0 0>; + next-level-cache = <&L2_0>; + }; + + cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0 1>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; + }; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0 0x80000000 0 0x80000000>; /* 2GB @ 2GB */ + }; + + gic: interrupt-controller@2c001000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + reg = <0 0x2c001000 0 0x1000>, + <0 0x2c002000 0 0x2000>, + <0 0x2c004000 0 0x2000>, + <0 0x2c006000 0 0x2000>; + interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, + <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, + <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, + <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; + }; + + dcc { + compatible = "arm,vexpress,config-bus"; + arm,vexpress,config-bridge = <&v2m_sysreg>; + + smbclk: osc@4 { + /* SMC clock */ + compatible = "arm,vexpress-osc"; + arm,vexpress-sysreg,func = <1 4>; + freq-range = <40000000 40000000>; + #clock-cells = <0>; + clock-output-names = "smclk"; + }; + + volt@0 { + /* VIO to expansion board above */ + compatible = "arm,vexpress-volt"; + arm,vexpress-sysreg,func = <2 0>; + regulator-name = "VIO_UP"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + volt@1 { + /* 12V from power connector J6 */ + compatible = "arm,vexpress-volt"; + arm,vexpress-sysreg,func = <2 1>; + regulator-name = "12"; + regulator-always-on; + }; + + temp@0 { + /* FPGA temperature */ + compatible = "arm,vexpress-temp"; + arm,vexpress-sysreg,func = <4 0>; + label = "FPGA"; + }; + }; + + smb { + compatible = "simple-bus"; + + #address-cells = <2>; + #size-cells = <1>; + ranges = <0 0 0 0x08000000 0x04000000>, + <1 0 0 0x14000000 0x04000000>, + <2 0 0 0x18000000 0x04000000>, + <3 0 0 0x1c000000 0x04000000>, + <4 0 0 0x0c000000 0x04000000>, + <5 0 0 0x10000000 0x04000000>; + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 63>; + interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, + <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, + <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, + <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, + <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, + <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, + <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, + <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, + <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, + <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, + <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, + <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, + <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, + <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, + <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, + <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, + <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, + <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, + <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, + <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, + <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, + <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, + <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, + <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, + <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, + <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, + <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; + + /include/ "../../../../arm/boot/dts/vexpress-v2m-rs1.dtsi" + }; +}; diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi index d8c0bdc51882..9cb7cf94284a 100644 --- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi +++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi @@ -376,10 +376,19 @@ gic0: interrupt-controller@8010,00000000 { compatible = "arm,gic-v3"; #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; interrupt-controller; reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */ <0x8010 0x80000000 0x0 0x600000>; /* GICR */ interrupts = <1 9 0xf04>; + + its: gic-its@8010,00020000 { + compatible = "arm,gic-v3-its"; + msi-controller; + reg = <0x8010 0x20000 0x0 0x200000>; + }; }; uaa0: serial@87e0,24000000 { diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index f38c94f1d898..4e17e7ede33d 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -83,6 +83,7 @@ CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_AHCI_CEVA=y CONFIG_AHCI_XGENE=y CONFIG_PATA_PLATFORM=y CONFIG_PATA_OF_PLATFORM=y diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 39248d3adf5d..406485ed110a 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -19,6 +19,14 @@ #include <asm/psci.h> #include <asm/smp_plat.h> +/* Macros for consistency checks of the GICC subtable of MADT */ +#define ACPI_MADT_GICC_LENGTH \ + (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) + +#define BAD_MADT_GICC_ENTRY(entry, end) \ + (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ + (entry)->header.length != ACPI_MADT_GICC_LENGTH) + /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI /* ACPI table mapping after acpi_gbl_permanent_mmap is set */ diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 7ce589ca54a4..9047cab68fd3 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -15,7 +15,7 @@ #include <asm/cpuidle.h> #include <asm/cpu_ops.h> -int arm_cpuidle_init(unsigned int cpu) +int __init arm_cpuidle_init(unsigned int cpu) { int ret = -EOPNOTSUPP; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a7691a378668..f860bfda454a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -352,8 +352,8 @@ el1_inv: // TODO: add support for undefined instructions in kernel mode enable_dbg mov x0, sp + mov x2, x1 mov x1, #BAD_SYNC - mrs x2, esr_el1 b bad_mode ENDPROC(el1_sync) @@ -553,7 +553,7 @@ el0_inv: ct_user_exit mov x0, sp mov x1, #BAD_SYNC - mrs x2, esr_el1 + mov x2, x25 bl bad_mode b ret_to_user ENDPROC(el0_sync) diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S index bd9bfaa9269b..f332d5d1f6b4 100644 --- a/arch/arm64/kernel/entry32.S +++ b/arch/arm64/kernel/entry32.S @@ -32,13 +32,11 @@ ENTRY(compat_sys_sigreturn_wrapper) mov x0, sp - mov x27, #0 // prevent syscall restart handling (why) b compat_sys_sigreturn ENDPROC(compat_sys_sigreturn_wrapper) ENTRY(compat_sys_rt_sigreturn_wrapper) mov x0, sp - mov x27, #0 // prevent syscall restart handling (why) b compat_sys_rt_sigreturn ENDPROC(compat_sys_rt_sigreturn_wrapper) diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index e7d934d3afe0..7a1a5da6c8c1 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -31,7 +31,6 @@ #include <asm/current.h> #include <asm/debug-monitors.h> #include <asm/hw_breakpoint.h> -#include <asm/kdebug.h> #include <asm/traps.h> #include <asm/cputype.h> #include <asm/system_misc.h> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 702591f6180a..b31e9a4b6275 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1318,7 +1318,7 @@ static int armpmu_device_probe(struct platform_device *pdev) /* Don't bother with PPIs; they're already affine */ irq = platform_get_irq(pdev, 0); if (irq >= 0 && irq_is_percpu(irq)) - return 0; + goto out; irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); if (!irqs) @@ -1340,12 +1340,13 @@ static int armpmu_device_probe(struct platform_device *pdev) if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) break; - of_node_put(dn); if (cpu >= nr_cpu_ids) { pr_warn("Failed to find logical CPU for %s\n", dn->name); + of_node_put(dn); break; } + of_node_put(dn); irqs[i] = cpu; } @@ -1355,6 +1356,7 @@ static int armpmu_device_probe(struct platform_device *pdev) else kfree(irqs); +out: cpu_pmu->plat_device = pdev; return 0; } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 4b2121bd7f9c..50fb4696654e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -396,13 +396,13 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) { u64 hwid = processor->arm_mpidr; - if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { - pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); + if (!(processor->flags & ACPI_MADT_ENABLED)) { + pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); return; } - if (!(processor->flags & ACPI_MADT_ENABLED)) { - pr_err("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); + if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { + pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); return; } @@ -438,7 +438,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, struct acpi_madt_generic_interrupt *processor; processor = (struct acpi_madt_generic_interrupt *)header; - if (BAD_MADT_ENTRY(processor, end)) + if (BAD_MADT_GICC_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); @@ -693,7 +693,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) struct pt_regs *old_regs = set_irq_regs(regs); if ((unsigned)ipinr < NR_IPI) { - trace_ipi_entry(ipi_types[ipinr]); + trace_ipi_entry_rcuidle(ipi_types[ipinr]); __inc_irq_stat(cpu, ipi_irqs[ipinr]); } @@ -736,7 +736,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) } if ((unsigned)ipinr < NR_IPI) - trace_ipi_exit(ipi_types[ipinr]); + trace_ipi_exit_rcuidle(ipi_types[ipinr]); set_irq_regs(old_regs); } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index a12251c074a8..566bc4c35040 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -335,7 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) if (call_undef_hook(regs) == 0) return; - if (show_unhandled_signals_ratelimited() && unhandled_signal(current, SIGILL)) { + if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) { pr_info("%s[%d]: undefined instruction: pc=%p\n", current->comm, task_pid_nr(current), pc); dump_instr(KERN_INFO, regs); diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 9d84feb41a16..773d37a14039 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -4,5 +4,3 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ context.o proc.o pageattr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_ARM64_PTDUMP) += dump.o - -CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b1fc69cd1499..94d98cd1aad8 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -115,7 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, { struct siginfo si; - if (show_unhandled_signals_ratelimited() && unhandled_signal(tsk, sig)) { + if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) { pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, addr, esr); diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index cccc4af87a03..831ec534d449 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -33,13 +33,13 @@ int pmd_huge(pmd_t pmd) { - return !(pmd_val(pmd) & PMD_TABLE_BIT); + return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); } int pud_huge(pud_t pud) { #ifndef __PAGETABLE_PMD_FOLDED - return !(pud_val(pud) & PUD_TABLE_BIT); + return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT); #else return 0; #endif diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 82d3435bf14f..a4ede4e2ddd1 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -117,7 +117,7 @@ void split_pud(pud_t *old_pud, pmd_t *pmd) int i = 0; do { - set_pmd(pmd, __pmd(addr | prot)); + set_pmd(pmd, __pmd(addr | pgprot_val(prot))); addr += PMD_SIZE; } while (pmd++, i++, i < PTRS_PER_PMD); } diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index de0a81a539a0..98a26ce82d26 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -110,6 +110,10 @@ /* Rd = Rn >> shift; signed */ #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) +/* Zero extend */ +#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15) +#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31) + /* Move wide (immediate) */ #define A64_MOVEW(sf, Rd, imm16, shift, type) \ aarch64_insn_gen_movewide(Rd, imm16, shift, \ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index dc6a4842683a..c047598b09e0 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -113,9 +113,9 @@ static inline void emit_a64_mov_i(const int is64, const int reg, static inline int bpf2a64_offset(int bpf_to, int bpf_from, const struct jit_ctx *ctx) { - int to = ctx->offset[bpf_to + 1]; + int to = ctx->offset[bpf_to]; /* -1 to account for the Branch instruction */ - int from = ctx->offset[bpf_from + 1] - 1; + int from = ctx->offset[bpf_from] - 1; return to - from; } @@ -289,23 +289,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU | BPF_END | BPF_FROM_BE: #ifdef CONFIG_CPU_BIG_ENDIAN if (BPF_SRC(code) == BPF_FROM_BE) - break; + goto emit_bswap_uxt; #else /* !CONFIG_CPU_BIG_ENDIAN */ if (BPF_SRC(code) == BPF_FROM_LE) - break; + goto emit_bswap_uxt; #endif switch (imm) { case 16: emit(A64_REV16(is64, dst, dst), ctx); + /* zero-extend 16 bits into 64 bits */ + emit(A64_UXTH(is64, dst, dst), ctx); break; case 32: emit(A64_REV32(is64, dst, dst), ctx); + /* upper 32 bits already cleared */ break; case 64: emit(A64_REV64(dst, dst), ctx); break; } break; +emit_bswap_uxt: + switch (imm) { + case 16: + /* zero-extend 16 bits into 64 bits */ + emit(A64_UXTH(is64, dst, dst), ctx); + break; + case 32: + /* zero-extend 32 bits into 64 bits */ + emit(A64_UXTW(is64, dst, dst), ctx); + break; + case 64: + /* nop */ + break; + } + break; /* dst = imm */ case BPF_ALU | BPF_MOV | BPF_K: case BPF_ALU64 | BPF_MOV | BPF_K: @@ -640,10 +658,11 @@ static int build_body(struct jit_ctx *ctx) const struct bpf_insn *insn = &prog->insnsi[i]; int ret; + ret = build_insn(insn, ctx); + if (ctx->image == NULL) ctx->offset[i] = ctx->idx; - ret = build_insn(insn, ctx); if (ret > 0) { i++; continue; diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index c36efa0c7163..719dd796c12c 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c @@ -136,7 +136,7 @@ void decode_address(char *buf, unsigned long address) struct file *file = vma->vm_file; if (file) { - char *d_name = d_path(&file->f_path, _tmpbuf, + char *d_name = file_path(file, _tmpbuf, sizeof(_tmpbuf)); if (!IS_ERR(d_name)) name = d_name; diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c index 4dda9bd6b8fb..e989cee77414 100644 --- a/arch/cris/arch-v32/drivers/sync_serial.c +++ b/arch/cris/arch-v32/drivers/sync_serial.c @@ -1464,7 +1464,7 @@ static inline void handle_rx_packet(struct sync_port *port) if (port->write_ts_idx == NBR_IN_DESCR) port->write_ts_idx = 0; idx = port->write_ts_idx++; - do_posix_clock_monotonic_gettime(&port->timestamp[idx]); + ktime_get_ts(&port->timestamp[idx]); port->in_buffer_len += port->inbufchunk; } spin_unlock_irqrestore(&port->lock, flags); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 7f3028965064..97e48b0eefc7 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -215,10 +215,6 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) pmd_t *pmd; pte_t *pte; - if (!PageReserved(page)) - printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n", - page_address(page)); - pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ { diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 2a14585c90d2..aab7e46cadd5 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2231,7 +2231,7 @@ config MIPS_CMP config MIPS_CPS bool "MIPS Coherent Processing System support" - depends on SYS_SUPPORTS_MIPS_CPS && !64BIT + depends on SYS_SUPPORTS_MIPS_CPS select MIPS_CM select MIPS_CPC select MIPS_CPS_PM if HOTPLUG_CPU diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h index 37c08a27b4f0..c9f7e231e66b 100644 --- a/arch/mips/include/asm/mach-loongson64/mmzone.h +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & - * Insititute of Computing Technology + * Institute of Computing Technology * Author: Xiang Gao, gaoxiang@ict.ac.cn * Huacai Chen, chenhc@lemote.com * Xiaofu Meng, Shuangshuang Zhang diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 2b25d1ba1ea0..16f1ea9ab191 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -23,6 +23,7 @@ extern int smp_num_siblings; extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_core_map[]; +extern cpumask_t cpu_foreign_map; #define raw_smp_processor_id() (current_thread_info()->cpu) diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index c0c5e5972256..d8f9b357b222 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, break; case blezl_op: /* not really i_format */ - if (NO_R6EMU) + if (!insn.i_format.rt && NO_R6EMU) goto sigill_r6; case blez_op: /* @@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, break; case bgtzl_op: - if (NO_R6EMU) + if (!insn.i_format.rt && NO_R6EMU) goto sigill_r6; case bgtz_op: /* diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 55b759a0019e..1b6ca634e646 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry) nop /* This is an NMI */ - la k0, nmi_handler + PTR_LA k0, nmi_handler jr k0 nop @@ -107,10 +107,10 @@ not_nmi: mul t1, t1, t0 mul t1, t1, t2 - li a0, KSEG0 - add a1, a0, t1 + li a0, CKSEG0 + PTR_ADD a1, a0, t1 1: cache Index_Store_Tag_I, 0(a0) - add a0, a0, t0 + PTR_ADD a0, a0, t0 bne a0, a1, 1b nop icache_done: @@ -134,12 +134,12 @@ icache_done: mul t1, t1, t0 mul t1, t1, t2 - li a0, KSEG0 - addu a1, a0, t1 - subu a1, a1, t0 + li a0, CKSEG0 + PTR_ADDU a1, a0, t1 + PTR_SUBU a1, a1, t0 1: cache Index_Store_Tag_D, 0(a0) bne a0, a1, 1b - add a0, a0, t0 + PTR_ADD a0, a0, t0 dcache_done: /* Set Kseg0 CCA to that in s0 */ @@ -152,11 +152,11 @@ dcache_done: /* Enter the coherent domain */ li t0, 0xff - sw t0, GCR_CL_COHERENCE_OFS(v1) + PTR_S t0, GCR_CL_COHERENCE_OFS(v1) ehb /* Jump to kseg0 */ - la t0, 1f + PTR_LA t0, 1f jr t0 nop @@ -178,9 +178,9 @@ dcache_done: nop /* Off we go! */ - lw t1, VPEBOOTCFG_PC(v0) - lw gp, VPEBOOTCFG_GP(v0) - lw sp, VPEBOOTCFG_SP(v0) + PTR_L t1, VPEBOOTCFG_PC(v0) + PTR_L gp, VPEBOOTCFG_GP(v0) + PTR_L sp, VPEBOOTCFG_SP(v0) jr t1 nop END(mips_cps_core_entry) @@ -217,7 +217,7 @@ LEAF(excep_intex) .org 0x480 LEAF(excep_ejtag) - la k0, ejtag_debug_handler + PTR_LA k0, ejtag_debug_handler jr k0 nop END(excep_ejtag) @@ -229,7 +229,7 @@ LEAF(mips_cps_core_init) nop .set push - .set mips32r2 + .set mips64r2 .set mt /* Only allow 1 TC per VPE to execute... */ @@ -237,7 +237,7 @@ LEAF(mips_cps_core_init) /* ...and for the moment only 1 VPE */ dvpe - la t1, 1f + PTR_LA t1, 1f jr.hb t1 nop @@ -250,25 +250,25 @@ LEAF(mips_cps_core_init) mfc0 t0, CP0_MVPCONF0 srl t0, t0, MVPCONF0_PVPE_SHIFT andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) - addiu t7, t0, 1 + addiu ta3, t0, 1 /* If there's only 1, we're done */ beqz t0, 2f nop /* Loop through each VPE within this core */ - li t5, 1 + li ta1, 1 1: /* Operate on the appropriate TC */ - mtc0 t5, CP0_VPECONTROL + mtc0 ta1, CP0_VPECONTROL ehb /* Bind TC to VPE (1:1 TC:VPE mapping) */ - mttc0 t5, CP0_TCBIND + mttc0 ta1, CP0_TCBIND /* Set exclusive TC, non-active, master */ li t0, VPECONF0_MVP - sll t1, t5, VPECONF0_XTC_SHIFT + sll t1, ta1, VPECONF0_XTC_SHIFT or t0, t0, t1 mttc0 t0, CP0_VPECONF0 @@ -280,8 +280,8 @@ LEAF(mips_cps_core_init) mttc0 t0, CP0_TCHALT /* Next VPE */ - addiu t5, t5, 1 - slt t0, t5, t7 + addiu ta1, ta1, 1 + slt t0, ta1, ta3 bnez t0, 1b nop @@ -298,19 +298,19 @@ LEAF(mips_cps_core_init) LEAF(mips_cps_boot_vpes) /* Retrieve CM base address */ - la t0, mips_cm_base - lw t0, 0(t0) + PTR_LA t0, mips_cm_base + PTR_L t0, 0(t0) /* Calculate a pointer to this cores struct core_boot_config */ - lw t0, GCR_CL_ID_OFS(t0) + PTR_L t0, GCR_CL_ID_OFS(t0) li t1, COREBOOTCFG_SIZE mul t0, t0, t1 - la t1, mips_cps_core_bootcfg - lw t1, 0(t1) - addu t0, t0, t1 + PTR_LA t1, mips_cps_core_bootcfg + PTR_L t1, 0(t1) + PTR_ADDU t0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ - has_mt t6, 1f + has_mt ta2, 1f li t9, 0 /* Find the number of VPEs present in the core */ @@ -334,24 +334,24 @@ LEAF(mips_cps_boot_vpes) 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE mul v0, t9, t1 - lw t7, COREBOOTCFG_VPECONFIG(t0) - addu v0, v0, t7 + PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) + PTR_ADDU v0, v0, ta3 #ifdef CONFIG_MIPS_MT /* If the core doesn't support MT then return */ - bnez t6, 1f + bnez ta2, 1f nop jr ra nop .set push - .set mips32r2 + .set mips64r2 .set mt 1: /* Enter VPE configuration state */ dvpe - la t1, 1f + PTR_LA t1, 1f jr.hb t1 nop 1: mfc0 t1, CP0_MVPCONTROL @@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes) ehb /* Loop through each VPE */ - lw t6, COREBOOTCFG_VPEMASK(t0) - move t8, t6 - li t5, 0 + PTR_L ta2, COREBOOTCFG_VPEMASK(t0) + move t8, ta2 + li ta1, 0 /* Check whether the VPE should be running. If not, skip it */ -1: andi t0, t6, 1 +1: andi t0, ta2, 1 beqz t0, 2f nop @@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes) mfc0 t0, CP0_VPECONTROL ori t0, t0, VPECONTROL_TARGTC xori t0, t0, VPECONTROL_TARGTC - or t0, t0, t5 + or t0, t0, ta1 mtc0 t0, CP0_VPECONTROL ehb @@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes) /* Calculate a pointer to the VPEs struct vpe_boot_config */ li t0, VPEBOOTCFG_SIZE - mul t0, t0, t5 - addu t0, t0, t7 + mul t0, t0, ta1 + addu t0, t0, ta3 /* Set the TC restart PC */ lw t1, VPEBOOTCFG_PC(t0) @@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes) mttc0 t0, CP0_VPECONF0 /* Next VPE */ -2: srl t6, t6, 1 - addiu t5, t5, 1 - bnez t6, 1b +2: srl ta2, ta2, 1 + addiu ta1, ta1, 1 + bnez ta2, 1b nop /* Leave VPE configuration state */ @@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes) /* This VPE should be offline, halt the TC */ li t0, TCHALT_H mtc0 t0, CP0_TCHALT - la t0, 1f + PTR_LA t0, 1f 1: jr.hb t0 nop @@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes) .set noat lw $1, TI_CPU(gp) sll $1, $1, LONGLOG - la \dest, __per_cpu_offset + PTR_LA \dest, __per_cpu_offset addu $1, $1, \dest lw $1, 0($1) - la \dest, cps_cpu_state + PTR_LA \dest, cps_cpu_state addu \dest, \dest, $1 .set pop .endm diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 6e8de80bb446..4cc13508d967 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -73,10 +73,11 @@ NESTED(handle_sys, PT_SIZE, sp) .set noreorder .set nomacro -1: user_lw(t5, 16(t0)) # argument #5 from usp -4: user_lw(t6, 20(t0)) # argument #6 from usp -3: user_lw(t7, 24(t0)) # argument #7 from usp -2: user_lw(t8, 28(t0)) # argument #8 from usp +load_a4: user_lw(t5, 16(t0)) # argument #5 from usp +load_a5: user_lw(t6, 20(t0)) # argument #6 from usp +load_a6: user_lw(t7, 24(t0)) # argument #7 from usp +load_a7: user_lw(t8, 28(t0)) # argument #8 from usp +loads_done: sw t5, 16(sp) # argument #5 to ksp sw t6, 20(sp) # argument #6 to ksp @@ -85,10 +86,10 @@ NESTED(handle_sys, PT_SIZE, sp) .set pop .section __ex_table,"a" - PTR 1b,bad_stack - PTR 2b,bad_stack - PTR 3b,bad_stack - PTR 4b,bad_stack + PTR load_a4, bad_stack_a4 + PTR load_a5, bad_stack_a5 + PTR load_a6, bad_stack_a6 + PTR load_a7, bad_stack_a7 .previous lw t0, TI_FLAGS($28) # syscall tracing enabled? @@ -153,8 +154,8 @@ syscall_trace_entry: /* ------------------------------------------------------------------------ */ /* - * The stackpointer for a call with more than 4 arguments is bad. - * We probably should handle this case a bit more drastic. + * Our open-coded access area sanity test for the stack pointer + * failed. We probably should handle this case a bit more drastic. */ bad_stack: li v0, EFAULT @@ -163,6 +164,22 @@ bad_stack: sw t0, PT_R7(sp) j o32_syscall_exit +bad_stack_a4: + li t5, 0 + b load_a5 + +bad_stack_a5: + li t6, 0 + b load_a6 + +bad_stack_a6: + li t7, 0 + b load_a7 + +bad_stack_a7: + li t8, 0 + b loads_done + /* * The system call does not exist in this kernel */ diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index d07b210fbeff..f543ff4feef9 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp) daddu t1, t0, 32 bltz t1, bad_stack -1: lw a4, 16(t0) # argument #5 from usp -2: lw a5, 20(t0) # argument #6 from usp -3: lw a6, 24(t0) # argument #7 from usp -4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls) +load_a4: lw a4, 16(t0) # argument #5 from usp +load_a5: lw a5, 20(t0) # argument #6 from usp +load_a6: lw a6, 24(t0) # argument #7 from usp +load_a7: lw a7, 28(t0) # argument #8 from usp +loads_done: .section __ex_table,"a" - PTR 1b, bad_stack - PTR 2b, bad_stack - PTR 3b, bad_stack - PTR 4b, bad_stack + PTR load_a4, bad_stack_a4 + PTR load_a5, bad_stack_a5 + PTR load_a6, bad_stack_a6 + PTR load_a7, bad_stack_a7 .previous li t1, _TIF_WORK_SYSCALL_ENTRY @@ -167,6 +168,22 @@ bad_stack: sd t0, PT_R7(sp) j o32_syscall_exit +bad_stack_a4: + li a4, 0 + b load_a5 + +bad_stack_a5: + li a5, 0 + b load_a6 + +bad_stack_a6: + li a6, 0 + b load_a7 + +bad_stack_a7: + li a7, 0 + b loads_done + not_o32_scall: /* * This is not an o32 compatibility syscall, pass it on @@ -383,7 +400,7 @@ EXPORT(sys32_call_table) PTR sys_connect /* 4170 */ PTR sys_getpeername PTR sys_getsockname - PTR sys_getsockopt + PTR compat_sys_getsockopt PTR sys_listen PTR compat_sys_recv /* 4175 */ PTR compat_sys_recvfrom diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index be73c491182b..008b3378653a 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -337,6 +337,11 @@ static void __init bootmem_init(void) min_low_pfn = start; if (end <= reserved_end) continue; +#ifdef CONFIG_BLK_DEV_INITRD + /* mapstart should be after initrd_end */ + if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) + continue; +#endif if (start >= mapstart) continue; mapstart = max(reserved_end, start); @@ -366,14 +371,6 @@ static void __init bootmem_init(void) max_low_pfn = PFN_DOWN(HIGHMEM_START); } -#ifdef CONFIG_BLK_DEV_INITRD - /* - * mapstart should be after initrd_end - */ - if (initrd_end) - mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); -#endif - /* * Initialize the boot-time allocator with low memory only. */ diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 4251d390b5b6..c88937745b4e 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) /* * Patch the start of mips_cps_core_entry to provide: * - * v0 = CM base address + * v1 = CM base address * s0 = kseg0 CCA */ entry_code = (u32 *)&mips_cps_core_entry; @@ -369,7 +369,7 @@ void play_dead(void) static void wait_for_sibling_halt(void *ptr_cpu) { - unsigned cpu = (unsigned)ptr_cpu; + unsigned cpu = (unsigned long)ptr_cpu; unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); unsigned halted; unsigned long flags; @@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu) */ err = smp_call_function_single(cpu_death_sibling, wait_for_sibling_halt, - (void *)cpu, 1); + (void *)(unsigned long)cpu, 1); if (err) panic("Failed to call remote sibling CPU\n"); } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index faa46ebd9dda..d0744cc77ea7 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +/* + * A logcal cpu mask containing only one VPE per core to + * reduce the number of IPIs on large MT systems. + */ +cpumask_t cpu_foreign_map __read_mostly; +EXPORT_SYMBOL(cpu_foreign_map); + /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; @@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu) } } +/* + * Calculate a new cpu_foreign_map mask whenever a + * new cpu appears or disappears. + */ +static inline void calculate_cpu_foreign_map(void) +{ + int i, k, core_present; + cpumask_t temp_foreign_map; + + /* Re-calculate the mask */ + for_each_online_cpu(i) { + core_present = 0; + for_each_cpu(k, &temp_foreign_map) + if (cpu_data[i].package == cpu_data[k].package && + cpu_data[i].core == cpu_data[k].core) + core_present = 1; + if (!core_present) + cpumask_set_cpu(i, &temp_foreign_map); + } + + cpumask_copy(&cpu_foreign_map, &temp_foreign_map); +} + struct plat_smp_ops *mp_ops; EXPORT_SYMBOL(mp_ops); @@ -146,6 +176,8 @@ asmlinkage void start_secondary(void) set_cpu_sibling_map(cpu); set_cpu_core_map(cpu); + calculate_cpu_foreign_map(); + cpumask_set_cpu(cpu, &cpu_callin_map); synchronise_count_slave(cpu); @@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void) static void stop_this_cpu(void *dummy) { /* - * Remove this CPU: + * Remove this CPU. Be a bit slow here and + * set the bits for every online CPU so we don't miss + * any IPI whilst taking this VPE down. */ + + cpumask_copy(&cpu_foreign_map, cpu_online_mask); + + /* Make it visible to every other CPU */ + smp_mb(); + set_cpu_online(smp_processor_id(), false); + calculate_cpu_foreign_map(); local_irq_disable(); while (1); } @@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); set_cpu_core_map(0); + calculate_cpu_foreign_map(); #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); #endif diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 2a7b38ed23f0..e207a43b5f8f 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2130,10 +2130,10 @@ void per_cpu_trap_init(bool is_boot_cpu) BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); - /* Boot CPU's cache setup in setup_arch(). */ - if (!is_boot_cpu) - cpu_cache_init(); - tlb_init(); + /* Boot CPU's cache setup in setup_arch(). */ + if (!is_boot_cpu) + cpu_cache_init(); + tlb_init(); TLBMISS_HANDLER_SETUP(); } diff --git a/arch/mips/loongson64/common/bonito-irq.c b/arch/mips/loongson64/common/bonito-irq.c index cc0e4fd548e6..4e116d23bab3 100644 --- a/arch/mips/loongson64/common/bonito-irq.c +++ b/arch/mips/loongson64/common/bonito-irq.c @@ -3,7 +3,7 @@ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org) * - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it diff --git a/arch/mips/loongson64/common/cmdline.c b/arch/mips/loongson64/common/cmdline.c index 72fed003a536..01fbed137028 100644 --- a/arch/mips/loongson64/common/cmdline.c +++ b/arch/mips/loongson64/common/cmdline.c @@ -6,7 +6,7 @@ * Copyright 2003 ICT CAS * Author: Michael Guo <guoyi@ict.ac.cn> * - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * Copyright (C) 2009 Lemote Inc. diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c index 12c75db23420..875037063a80 100644 --- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c +++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c @@ -1,7 +1,7 @@ /* * CS5536 General timer functions * - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Yanhua, yanh@lemote.com * * Copyright (C) 2009 Lemote Inc. diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c index 22f04ca2ff3e..f6c44dd332e2 100644 --- a/arch/mips/loongson64/common/env.c +++ b/arch/mips/loongson64/common/env.c @@ -6,7 +6,7 @@ * Copyright 2003 ICT CAS * Author: Michael Guo <guoyi@ict.ac.cn> * - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * Copyright (C) 2009 Lemote Inc. diff --git a/arch/mips/loongson64/common/irq.c b/arch/mips/loongson64/common/irq.c index 687003b19b45..d36d969a4a87 100644 --- a/arch/mips/loongson64/common/irq.c +++ b/arch/mips/loongson64/common/irq.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it diff --git a/arch/mips/loongson64/common/setup.c b/arch/mips/loongson64/common/setup.c index d477dd6bb326..2dc5122f0e09 100644 --- a/arch/mips/loongson64/common/setup.c +++ b/arch/mips/loongson64/common/setup.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it diff --git a/arch/mips/loongson64/fuloong-2e/irq.c b/arch/mips/loongson64/fuloong-2e/irq.c index ef5ec8f3de5f..892963f860b7 100644 --- a/arch/mips/loongson64/fuloong-2e/irq.c +++ b/arch/mips/loongson64/fuloong-2e/irq.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson64/lemote-2f/clock.c index 462e34d46b4a..a78fb657068c 100644 --- a/arch/mips/loongson64/lemote-2f/clock.c +++ b/arch/mips/loongson64/lemote-2f/clock.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology * Author: Yanhua, yanh@lemote.com * * This file is subject to the terms and conditions of the GNU General Public @@ -15,7 +15,7 @@ #include <linux/spinlock.h> #include <asm/clock.h> -#include <asm/mach-loongson/loongson.h> +#include <asm/mach-loongson64/loongson.h> static LIST_HEAD(clock_list); static DEFINE_SPINLOCK(clock_lock); diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c index 12d14ed48778..6f9e010cec4d 100644 --- a/arch/mips/loongson64/loongson-3/numa.c +++ b/arch/mips/loongson64/loongson-3/numa.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & - * Insititute of Computing Technology + * Institute of Computing Technology * Author: Xiang Gao, gaoxiang@ict.ac.cn * Huacai Chen, chenhc@lemote.com * Xiaofu Meng, Shuangshuang Zhang diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 22b9b2cb9219..712f17a2ecf2 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, /* Fall through */ case jr_op: /* For R6, JR already emulated in jalr_op */ - if (NO_R6EMU && insn.r_format.opcode == jr_op) + if (NO_R6EMU && insn.r_format.func == jr_op) break; *contpc = regs->regs[insn.r_format.rs]; return 1; @@ -551,7 +551,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; case blezl_op: - if (NO_R6EMU) + if (!insn.i_format.rt && NO_R6EMU) break; case blez_op: @@ -588,7 +588,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; case bgtzl_op: - if (NO_R6EMU) + if (!insn.i_format.rt && NO_R6EMU) break; case bgtz_op: /* diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 7f660dc67596..fbea4432f3f2 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -37,6 +37,7 @@ #include <asm/cacheflush.h> /* for run_uncached() */ #include <asm/traps.h> #include <asm/dma-coherence.h> +#include <asm/mips-cm.h> /* * Special Variant of smp_call_function for use by cache functions: @@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) { preempt_disable(); -#ifndef CONFIG_MIPS_MT_SMP - smp_call_function(func, info, 1); -#endif + /* + * The Coherent Manager propagates address-based cache ops to other + * cores but not index-based ops. However, r4k_on_each_cpu is used + * in both cases so there is no easy way to tell what kind of op is + * executed to the other cores. The best we can probably do is + * to restrict that call when a CM is not present because both + * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops. + */ + if (!mips_cm_present()) + smp_call_function_many(&cpu_foreign_map, func, info, 1); func(info); preempt_enable(); } @@ -937,7 +945,9 @@ static void b5k_instruction_hazard(void) } static char *way_string[] = { NULL, "direct mapped", "2-way", - "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" + "3-way", "4-way", "5-way", "6-way", "7-way", "8-way", + "9-way", "10-way", "11-way", "12-way", + "13-way", "14-way", "15-way", "16-way", }; static void probe_pcache(void) diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 185e68261f45..5625b190edc0 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -119,18 +119,24 @@ void read_persistent_clock(struct timespec *ts) int get_c0_fdc_int(void) { - int mips_cpu_fdc_irq; + /* + * Some cores claim the FDC is routable through the GIC, but it doesn't + * actually seem to be connected for those Malta bitstreams. + */ + switch (current_cpu_type()) { + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + return -1; + }; if (cpu_has_veic) - mips_cpu_fdc_irq = -1; + return -1; else if (gic_present) - mips_cpu_fdc_irq = gic_get_c0_fdc_int(); + return gic_get_c0_fdc_int(); else if (cp0_fdc_irq >= 0) - mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq; + return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; else - mips_cpu_fdc_irq = -1; - - return mips_cpu_fdc_irq; + return -1; } int get_c0_perfcount_int(void) diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c index d2dc836523a3..8bd8ebb20a72 100644 --- a/arch/mips/pistachio/init.c +++ b/arch/mips/pistachio/init.c @@ -63,13 +63,19 @@ void __init plat_mem_setup(void) plat_setup_iocoherency(); } -#define DEFAULT_CPC_BASE_ADDR 0x1bde0000 +#define DEFAULT_CPC_BASE_ADDR 0x1bde0000 +#define DEFAULT_CDMM_BASE_ADDR 0x1bdd0000 phys_addr_t mips_cpc_default_phys_base(void) { return DEFAULT_CPC_BASE_ADDR; } +phys_addr_t mips_cdmm_phys_base(void) +{ + return DEFAULT_CDMM_BASE_ADDR; +} + static void __init mips_nmi_setup(void) { void *base; diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c index 67889fcea8aa..7c73fcb92a10 100644 --- a/arch/mips/pistachio/time.c +++ b/arch/mips/pistachio/time.c @@ -27,6 +27,11 @@ int get_c0_perfcount_int(void) return gic_get_c0_perfcount_int(); } +int get_c0_fdc_int(void) +{ + return gic_get_c0_fdc_int(); +} + void __init plat_time_init(void) { struct device_node *np; diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c index be186a75f622..9e3cc8a40ee9 100644 --- a/arch/nios2/kernel/time.c +++ b/arch/nios2/kernel/time.c @@ -19,7 +19,9 @@ #include <linux/io.h> #include <linux/slab.h> -#define ALTERA_TIMER_STATUS_REG 0 +#define ALTR_TIMER_COMPATIBLE "altr,timer-1.0" + +#define ALTERA_TIMER_STATUS_REG 0 #define ALTERA_TIMER_CONTROL_REG 4 #define ALTERA_TIMER_PERIODL_REG 8 #define ALTERA_TIMER_PERIODH_REG 12 @@ -304,7 +306,16 @@ void read_persistent_clock(struct timespec *ts) void __init time_init(void) { + struct device_node *np; + int count = 0; + + for_each_compatible_node(np, NULL, ALTR_TIMER_COMPATIBLE) + count++; + + if (count < 2) + panic("%d timer is found, it needs 2 timers in system\n", count); + clocksource_of_init(); } -CLOCKSOURCE_OF_DECLARE(nios2_timer, "altr,timer-1.0", nios2_time_init); +CLOCKSOURCE_OF_DECLARE(nios2_timer, ALTR_TIMER_COMPATIBLE, nios2_time_init); diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 0a183756d6ec..f93c4a4e6580 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -16,7 +16,7 @@ #include <asm/processor.h> #include <asm/cache.h> -extern spinlock_t pa_dbit_lock; +extern spinlock_t pa_tlb_lock; /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel @@ -33,6 +33,19 @@ extern spinlock_t pa_dbit_lock; */ #define kern_addr_valid(addr) (1) +/* Purge data and instruction TLB entries. Must be called holding + * the pa_tlb_lock. The TLB purge instructions are slow on SMP + * machines since the purge must be broadcast to all CPUs. + */ + +static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) +{ + mtsp(mm->context, 1); + pdtlb(addr); + if (unlikely(split_tlb)) + pitlb(addr); +} + /* Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. @@ -42,15 +55,20 @@ extern spinlock_t pa_dbit_lock; *(pteptr) = (pteval); \ } while(0) -extern void purge_tlb_entries(struct mm_struct *, unsigned long); +#define pte_inserted(x) \ + ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \ + == (_PAGE_PRESENT|_PAGE_ACCESSED)) -#define set_pte_at(mm, addr, ptep, pteval) \ - do { \ +#define set_pte_at(mm, addr, ptep, pteval) \ + do { \ + pte_t old_pte; \ unsigned long flags; \ - spin_lock_irqsave(&pa_dbit_lock, flags); \ - set_pte(ptep, pteval); \ - purge_tlb_entries(mm, addr); \ - spin_unlock_irqrestore(&pa_dbit_lock, flags); \ + spin_lock_irqsave(&pa_tlb_lock, flags); \ + old_pte = *ptep; \ + set_pte(ptep, pteval); \ + if (pte_inserted(old_pte)) \ + purge_tlb_entries(mm, addr); \ + spin_unlock_irqrestore(&pa_tlb_lock, flags); \ } while (0) #endif /* !__ASSEMBLY__ */ @@ -268,7 +286,7 @@ extern unsigned long *empty_zero_page; #define pte_none(x) (pte_val(x) == 0) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) +#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0)) #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) @@ -435,15 +453,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned if (!pte_young(*ptep)) return 0; - spin_lock_irqsave(&pa_dbit_lock, flags); + spin_lock_irqsave(&pa_tlb_lock, flags); pte = *ptep; if (!pte_young(pte)) { - spin_unlock_irqrestore(&pa_dbit_lock, flags); + spin_unlock_irqrestore(&pa_tlb_lock, flags); return 0; } set_pte(ptep, pte_mkold(pte)); purge_tlb_entries(vma->vm_mm, addr); - spin_unlock_irqrestore(&pa_dbit_lock, flags); + spin_unlock_irqrestore(&pa_tlb_lock, flags); return 1; } @@ -453,11 +471,12 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t old_pte; unsigned long flags; - spin_lock_irqsave(&pa_dbit_lock, flags); + spin_lock_irqsave(&pa_tlb_lock, flags); old_pte = *ptep; - pte_clear(mm,addr,ptep); - purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(&pa_dbit_lock, flags); + set_pte(ptep, __pte(0)); + if (pte_inserted(old_pte)) + purge_tlb_entries(mm, addr); + spin_unlock_irqrestore(&pa_tlb_lock, flags); return old_pte; } @@ -465,10 +484,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long flags; - spin_lock_irqsave(&pa_dbit_lock, flags); + spin_lock_irqsave(&pa_tlb_lock, flags); set_pte(ptep, pte_wrprotect(*ptep)); purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(&pa_dbit_lock, flags); + spin_unlock_irqrestore(&pa_tlb_lock, flags); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h index 9d086a599fa0..e84b96478193 100644 --- a/arch/parisc/include/asm/tlbflush.h +++ b/arch/parisc/include/asm/tlbflush.h @@ -13,6 +13,9 @@ * active at any one time on the Merced bus. This tlb purge * synchronisation is fairly lightweight and harmless so we activate * it on all systems not just the N class. + + * It is also used to ensure PTE updates are atomic and consistent + * with the TLB. */ extern spinlock_t pa_tlb_lock; @@ -24,20 +27,24 @@ extern void flush_tlb_all_local(void *); #define smp_flush_tlb_all() flush_tlb_all() +int __flush_tlb_range(unsigned long sid, + unsigned long start, unsigned long end); + +#define flush_tlb_range(vma, start, end) \ + __flush_tlb_range((vma)->vm_mm->context, start, end) + +#define flush_tlb_kernel_range(start, end) \ + __flush_tlb_range(0, start, end) + /* * flush_tlb_mm() * - * XXX This code is NOT valid for HP-UX compatibility processes, - * (although it will probably work 99% of the time). HP-UX - * processes are free to play with the space id's and save them - * over long periods of time, etc. so we have to preserve the - * space and just flush the entire tlb. We need to check the - * personality in order to do that, but the personality is not - * currently being set correctly. - * - * Of course, Linux processes could do the same thing, but - * we don't support that (and the compilers, dynamic linker, - * etc. do not do that). + * The code to switch to a new context is NOT valid for processes + * which play with the space id's. Thus, we have to preserve the + * space and just flush the entire tlb. However, the compilers, + * dynamic linker, etc, do not manipulate space id's, so there + * could be a significant performance benefit in switching contexts + * and not flushing the whole tlb. */ static inline void flush_tlb_mm(struct mm_struct *mm) @@ -45,10 +52,18 @@ static inline void flush_tlb_mm(struct mm_struct *mm) BUG_ON(mm == &init_mm); /* Should never happen */ #if 1 || defined(CONFIG_SMP) + /* Except for very small threads, flushing the whole TLB is + * faster than using __flush_tlb_range. The pdtlb and pitlb + * instructions are very slow because of the TLB broadcast. + * It might be faster to do local range flushes on all CPUs + * on PA 2.0 systems. + */ flush_tlb_all(); #else /* FIXME: currently broken, causing space id and protection ids - * to go out of sync, resulting in faults on userspace accesses. + * to go out of sync, resulting in faults on userspace accesses. + * This approach needs further investigation since running many + * small applications (e.g., GCC testsuite) is faster on HP-UX. */ if (mm) { if (mm->context != 0) @@ -65,22 +80,12 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, { unsigned long flags, sid; - /* For one page, it's not worth testing the split_tlb variable */ - - mb(); sid = vma->vm_mm->context; purge_tlb_start(flags); mtsp(sid, 1); pdtlb(addr); - pitlb(addr); + if (unlikely(split_tlb)) + pitlb(addr); purge_tlb_end(flags); } - -void __flush_tlb_range(unsigned long sid, - unsigned long start, unsigned long end); - -#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end) - -#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end) - #endif diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index f6448c7c62b5..cda6dbbe9842 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -342,12 +342,15 @@ EXPORT_SYMBOL(flush_data_cache_local); EXPORT_SYMBOL(flush_kernel_icache_range_asm); #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ -int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; +static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; + +#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */ +static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD; void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; - unsigned long size; + unsigned long size, start; alltime = mfctl(16); flush_data_cache(); @@ -364,14 +367,43 @@ void __init parisc_setup_cache_timing(void) /* Racy, but if we see an intermediate value, it's ok too... */ parisc_cache_flush_threshold = size * alltime / rangetime; - parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); + parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold); if (!parisc_cache_flush_threshold) parisc_cache_flush_threshold = FLUSH_THRESHOLD; if (parisc_cache_flush_threshold > cache_info.dc_size) parisc_cache_flush_threshold = cache_info.dc_size; - printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); + printk(KERN_INFO "Setting cache flush threshold to %lu kB\n", + parisc_cache_flush_threshold/1024); + + /* calculate TLB flush threshold */ + + alltime = mfctl(16); + flush_tlb_all(); + alltime = mfctl(16) - alltime; + + size = PAGE_SIZE; + start = (unsigned long) _text; + rangetime = mfctl(16); + while (start < (unsigned long) _end) { + flush_tlb_kernel_range(start, start + PAGE_SIZE); + start += PAGE_SIZE; + size += PAGE_SIZE; + } + rangetime = mfctl(16) - rangetime; + + printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", + alltime, size, rangetime); + + parisc_tlb_flush_threshold = size * alltime / rangetime; + parisc_tlb_flush_threshold *= num_online_cpus(); + parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold); + if (!parisc_tlb_flush_threshold) + parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; + + printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n", + parisc_tlb_flush_threshold/1024); } extern void purge_kernel_dcache_page_asm(unsigned long); @@ -403,48 +435,45 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, } EXPORT_SYMBOL(copy_user_page); -void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) -{ - unsigned long flags; - - /* Note: purge_tlb_entries can be called at startup with - no context. */ - - purge_tlb_start(flags); - mtsp(mm->context, 1); - pdtlb(addr); - pitlb(addr); - purge_tlb_end(flags); -} -EXPORT_SYMBOL(purge_tlb_entries); - -void __flush_tlb_range(unsigned long sid, unsigned long start, - unsigned long end) +/* __flush_tlb_range() + * + * returns 1 if all TLBs were flushed. + */ +int __flush_tlb_range(unsigned long sid, unsigned long start, + unsigned long end) { - unsigned long npages; + unsigned long flags, size; - npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ + size = (end - start); + if (size >= parisc_tlb_flush_threshold) { flush_tlb_all(); - else { - unsigned long flags; + return 1; + } + /* Purge TLB entries for small ranges using the pdtlb and + pitlb instructions. These instructions execute locally + but cause a purge request to be broadcast to other TLBs. */ + if (likely(!split_tlb)) { + while (start < end) { + purge_tlb_start(flags); + mtsp(sid, 1); + pdtlb(start); + purge_tlb_end(flags); + start += PAGE_SIZE; + } + return 0; + } + + /* split TLB case */ + while (start < end) { purge_tlb_start(flags); mtsp(sid, 1); - if (split_tlb) { - while (npages--) { - pdtlb(start); - pitlb(start); - start += PAGE_SIZE; - } - } else { - while (npages--) { - pdtlb(start); - start += PAGE_SIZE; - } - } + pdtlb(start); + pitlb(start); purge_tlb_end(flags); + start += PAGE_SIZE; } + return 0; } static void cacheflush_h_tmp_function(void *dummy) diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 75819617f93b..c5ef4081b01d 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -45,7 +45,7 @@ .level 2.0 #endif - .import pa_dbit_lock,data + .import pa_tlb_lock,data /* space_to_prot macro creates a prot id from a space id */ @@ -420,8 +420,8 @@ SHLREG %r9,PxD_VALUE_SHIFT,\pmd extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ - shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd - LDREG %r0(\pmd),\pte /* pmd is now pte */ + shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ + LDREG %r0(\pmd),\pte bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault .endm @@ -453,57 +453,53 @@ L2_ptep \pgd,\pte,\index,\va,\fault .endm - /* Acquire pa_dbit_lock lock. */ - .macro dbit_lock spc,tmp,tmp1 + /* Acquire pa_tlb_lock lock and recheck page is still present. */ + .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault #ifdef CONFIG_SMP cmpib,COND(=),n 0,\spc,2f - load32 PA(pa_dbit_lock),\tmp + load32 PA(pa_tlb_lock),\tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop + LDREG 0(\ptp),\pte + bb,<,n \pte,_PAGE_PRESENT_BIT,2f + b \fault + stw \spc,0(\tmp) 2: #endif .endm - /* Release pa_dbit_lock lock without reloading lock address. */ - .macro dbit_unlock0 spc,tmp + /* Release pa_tlb_lock lock without reloading lock address. */ + .macro tlb_unlock0 spc,tmp #ifdef CONFIG_SMP or,COND(=) %r0,\spc,%r0 stw \spc,0(\tmp) #endif .endm - /* Release pa_dbit_lock lock. */ - .macro dbit_unlock1 spc,tmp + /* Release pa_tlb_lock lock. */ + .macro tlb_unlock1 spc,tmp #ifdef CONFIG_SMP - load32 PA(pa_dbit_lock),\tmp - dbit_unlock0 \spc,\tmp + load32 PA(pa_tlb_lock),\tmp + tlb_unlock0 \spc,\tmp #endif .endm /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and * don't needlessly dirty the cache line if it was already set */ - .macro update_ptep spc,ptep,pte,tmp,tmp1 -#ifdef CONFIG_SMP - or,COND(=) %r0,\spc,%r0 - LDREG 0(\ptep),\pte -#endif + .macro update_accessed ptp,pte,tmp,tmp1 ldi _PAGE_ACCESSED,\tmp1 or \tmp1,\pte,\tmp and,COND(<>) \tmp1,\pte,%r0 - STREG \tmp,0(\ptep) + STREG \tmp,0(\ptp) .endm /* Set the dirty bit (and accessed bit). No need to be * clever, this is only used from the dirty fault */ - .macro update_dirty spc,ptep,pte,tmp -#ifdef CONFIG_SMP - or,COND(=) %r0,\spc,%r0 - LDREG 0(\ptep),\pte -#endif + .macro update_dirty ptp,pte,tmp ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp or \tmp,\pte,\pte - STREG \pte,0(\ptep) + STREG \pte,0(\ptp) .endm /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) @@ -1148,14 +1144,14 @@ dtlb_miss_20w: L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w + update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot idtlbt pte,prot - dbit_unlock1 spc,t0 + tlb_unlock1 spc,t0 rfir nop @@ -1174,14 +1170,14 @@ nadtlb_miss_20w: L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w + update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot idtlbt pte,prot - dbit_unlock1 spc,t0 + tlb_unlock1 spc,t0 rfir nop @@ -1202,20 +1198,20 @@ dtlb_miss_11: L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 + update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ + mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ mtsp spc,%sr1 idtlba pte,(%sr1,va) idtlbp prot,(%sr1,va) - mtsp t0, %sr1 /* Restore sr1 */ - dbit_unlock1 spc,t0 + mtsp t1, %sr1 /* Restore sr1 */ + tlb_unlock1 spc,t0 rfir nop @@ -1235,21 +1231,20 @@ nadtlb_miss_11: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 + update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot - - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ + mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ mtsp spc,%sr1 idtlba pte,(%sr1,va) idtlbp prot,(%sr1,va) - mtsp t0, %sr1 /* Restore sr1 */ - dbit_unlock1 spc,t0 + mtsp t1, %sr1 /* Restore sr1 */ + tlb_unlock1 spc,t0 rfir nop @@ -1269,16 +1264,16 @@ dtlb_miss_20: L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 + update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot - f_extend pte,t0 + f_extend pte,t1 idtlbt pte,prot - dbit_unlock1 spc,t0 + tlb_unlock1 spc,t0 rfir nop @@ -1297,16 +1292,16 @@ nadtlb_miss_20: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 + update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot - f_extend pte,t0 + f_extend pte,t1 - idtlbt pte,prot - dbit_unlock1 spc,t0 + idtlbt pte,prot + tlb_unlock1 spc,t0 rfir nop @@ -1406,14 +1401,14 @@ itlb_miss_20w: L3_ptep ptp,pte,t0,va,itlb_fault - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,itlb_fault + update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot iitlbt pte,prot - dbit_unlock1 spc,t0 + tlb_unlock1 spc,t0 rfir nop @@ -1430,14 +1425,14 @@ naitlb_miss_20w: L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w + update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot iitlbt pte,prot - dbit_unlock1 spc,t0 + tlb_unlock1 spc,t0 rfir nop @@ -1458,20 +1453,20 @@ itlb_miss_11: L2_ptep ptp,pte,t0,va,itlb_fault - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,itlb_fault + update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ + mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ mtsp spc,%sr1 iitlba pte,(%sr1,va) iitlbp prot,(%sr1,va) - mtsp t0, %sr1 /* Restore sr1 */ - dbit_unlock1 spc,t0 + mtsp t1, %sr1 /* Restore sr1 */ + tlb_unlock1 spc,t0 rfir nop @@ -1482,20 +1477,20 @@ naitlb_miss_11: L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 + update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ + mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ mtsp spc,%sr1 iitlba pte,(%sr1,va) iitlbp prot,(%sr1,va) - mtsp t0, %sr1 /* Restore sr1 */ - dbit_unlock1 spc,t0 + mtsp t1, %sr1 /* Restore sr1 */ + tlb_unlock1 spc,t0 rfir nop @@ -1516,16 +1511,16 @@ itlb_miss_20: L2_ptep ptp,pte,t0,va,itlb_fault - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,itlb_fault + update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot - f_extend pte,t0 + f_extend pte,t1 iitlbt pte,prot - dbit_unlock1 spc,t0 + tlb_unlock1 spc,t0 rfir nop @@ -1536,16 +1531,16 @@ naitlb_miss_20: L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 + update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot - f_extend pte,t0 + f_extend pte,t1 iitlbt pte,prot - dbit_unlock1 spc,t0 + tlb_unlock1 spc,t0 rfir nop @@ -1568,14 +1563,14 @@ dbit_trap_20w: L3_ptep ptp,pte,t0,va,dbit_fault - dbit_lock spc,t0,t1 - update_dirty spc,ptp,pte,t1 + tlb_lock spc,ptp,pte,t0,t1,dbit_fault + update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot idtlbt pte,prot - dbit_unlock0 spc,t0 + tlb_unlock0 spc,t0 rfir nop #else @@ -1588,8 +1583,8 @@ dbit_trap_11: L2_ptep ptp,pte,t0,va,dbit_fault - dbit_lock spc,t0,t1 - update_dirty spc,ptp,pte,t1 + tlb_lock spc,ptp,pte,t0,t1,dbit_fault + update_dirty ptp,pte,t1 make_insert_tlb_11 spc,pte,prot @@ -1600,8 +1595,8 @@ dbit_trap_11: idtlbp prot,(%sr1,va) mtsp t1, %sr1 /* Restore sr1 */ - dbit_unlock0 spc,t0 + tlb_unlock0 spc,t0 rfir nop @@ -1612,16 +1607,16 @@ dbit_trap_20: L2_ptep ptp,pte,t0,va,dbit_fault - dbit_lock spc,t0,t1 - update_dirty spc,ptp,pte,t1 + tlb_lock spc,ptp,pte,t0,t1,dbit_fault + update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot f_extend pte,t1 - idtlbt pte,prot - dbit_unlock0 spc,t0 + idtlbt pte,prot + tlb_unlock0 spc,t0 rfir nop #endif diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 6548fd1d2e62..b99b39f1da02 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -43,10 +43,6 @@ #include "../math-emu/math-emu.h" /* for handle_fpe() */ -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -DEFINE_SPINLOCK(pa_dbit_lock); -#endif - static void parisc_show_stack(struct task_struct *task, unsigned long *sp, struct pt_regs *regs); diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index ccde8f084ce4..112ccf497562 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -52,6 +52,22 @@ .text /* + * Used by threads when the lock bit of core_idle_state is set. + * Threads will spin in HMT_LOW until the lock bit is cleared. + * r14 - pointer to core_idle_state + * r15 - used to load contents of core_idle_state + */ + +core_idle_lock_held: + HMT_LOW +3: lwz r15,0(r14) + andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT + bne 3b + HMT_MEDIUM + lwarx r15,0,r14 + blr + +/* * Pass requested state in r3: * r3 - PNV_THREAD_NAP/SLEEP/WINKLE * @@ -150,6 +166,10 @@ power7_enter_nap_mode: ld r14,PACA_CORE_IDLE_STATE_PTR(r13) lwarx_loop1: lwarx r15,0,r14 + + andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT + bnel core_idle_lock_held + andc r15,r15,r7 /* Clear thread bit */ andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS @@ -294,7 +314,7 @@ lwarx_loop2: * workaround undo code or resyncing timebase or restoring context * In either case loop until the lock bit is cleared. */ - bne core_idle_lock_held + bnel core_idle_lock_held cmpwi cr2,r15,0 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) @@ -319,15 +339,6 @@ lwarx_loop2: isync b common_exit -core_idle_lock_held: - HMT_LOW -core_idle_lock_loop: - lwz r15,0(14) - andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT - bne core_idle_lock_loop - HMT_MEDIUM - b lwarx_loop2 - first_thread_in_subcore: /* First thread in subcore to wakeup */ ori r15,r15,PNV_CORE_IDLE_LOCK_BIT diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6530f1b8874d..37de90f8a845 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -297,6 +297,8 @@ long machine_check_early(struct pt_regs *regs) __this_cpu_inc(irq_stat.mce_exceptions); + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); + if (cur_cpu_spec && cur_cpu_spec->machine_check_early) handled = cur_cpu_spec->machine_check_early(regs); return handled; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 6d535973b200..a67c6d781c52 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -529,6 +529,10 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) printk(KERN_ALERT "Unable to handle kernel paging request for " "instruction fetch\n"); break; + case 0x600: + printk(KERN_ALERT "Unable to handle kernel paging request for " + "unaligned access at address 0x%08lx\n", regs->dar); + break; default: printk(KERN_ALERT "Unable to handle kernel paging request for " "unknown fault\n"); diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index ec2eb20631d1..df956295c2a7 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -320,6 +320,8 @@ static struct attribute *device_str_attr_create_(char *name, char *str) if (!attr) return NULL; + sysfs_attr_init(&attr->attr.attr); + attr->var = str; attr->attr.attr.name = name; attr->attr.attr.mode = 0444; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 1ba6307be4db..11634fa7ab3c 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -166,7 +166,7 @@ static void spufs_prune_dir(struct dentry *dir) mutex_lock(&d_inode(dir)->i_mutex); list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { spin_lock(&dentry->d_lock); - if (!(d_unhashed(dentry)) && d_really_is_positive(dentry)) { + if (simple_positive(dentry)) { dget_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 4949ef0d9400..37f959bf392e 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -237,7 +237,7 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) return elog; } -static void elog_work_fn(struct work_struct *work) +static irqreturn_t elog_event(int irq, void *data) { __be64 size; __be64 id; @@ -251,7 +251,7 @@ static void elog_work_fn(struct work_struct *work) rc = opal_get_elog_size(&id, &size, &type); if (rc != OPAL_SUCCESS) { pr_err("ELOG: OPAL log info read failed\n"); - return; + return IRQ_HANDLED; } elog_size = be64_to_cpu(size); @@ -270,16 +270,10 @@ static void elog_work_fn(struct work_struct *work) * entries. */ if (kset_find_obj(elog_kset, name)) - return; + return IRQ_HANDLED; create_elog_obj(log_id, elog_size, elog_type); -} - -static DECLARE_WORK(elog_work, elog_work_fn); -static irqreturn_t elog_event(int irq, void *data) -{ - schedule_work(&elog_work); return IRQ_HANDLED; } @@ -304,8 +298,8 @@ int __init opal_elog_init(void) return irq; } - rc = request_irq(irq, elog_event, - IRQ_TYPE_LEVEL_HIGH, "opal-elog", NULL); + rc = request_threaded_irq(irq, NULL, elog_event, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL); if (rc) { pr_err("%s: Can't request OPAL event irq (%d)\n", __func__, rc); diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 46cb3feb0a13..4ece8e40dd54 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -112,6 +112,7 @@ static int opal_prd_open(struct inode *inode, struct file *file) static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) { size_t addr, size; + pgprot_t page_prot; int rc; pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n", @@ -125,13 +126,11 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) if (!opal_prd_range_is_valid(addr, size)) return -EINVAL; - vma->vm_page_prot = __pgprot(pgprot_val(phys_mem_access_prot(file, - vma->vm_pgoff, - size, vma->vm_page_prot)) - | _PAGE_SPECIAL); + page_prot = phys_mem_access_prot(file, vma->vm_pgoff, + size, vma->vm_page_prot); rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, - vma->vm_page_prot); + page_prot); return rc; } diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c index 2bc33674ebfc..87f9623ca805 100644 --- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c +++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c @@ -18,6 +18,7 @@ #include <linux/pci.h> #include <linux/semaphore.h> #include <asm/msi_bitmap.h> +#include <asm/ppc-pci.h> struct ppc4xx_hsta_msi { struct device *dev; diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index d3f896a35b98..b2e5902bd8f4 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -62,18 +62,13 @@ static void hypfs_add_dentry(struct dentry *dentry) hypfs_last_dentry = dentry; } -static inline int hypfs_positive(struct dentry *dentry) -{ - return d_really_is_positive(dentry) && !d_unhashed(dentry); -} - static void hypfs_remove(struct dentry *dentry) { struct dentry *parent; parent = dentry->d_parent; mutex_lock(&d_inode(parent)->i_mutex); - if (hypfs_positive(dentry)) { + if (simple_positive(dentry)) { if (d_is_dir(dentry)) simple_rmdir(d_inode(parent), dentry); else @@ -456,8 +451,6 @@ static const struct super_operations hypfs_s_ops = { .show_options = hypfs_show_options, }; -static struct kobject *s390_kobj; - static int __init hypfs_init(void) { int rc; @@ -481,18 +474,16 @@ static int __init hypfs_init(void) rc = -ENODATA; goto fail_hypfs_sprp_exit; } - s390_kobj = kobject_create_and_add("s390", hypervisor_kobj); - if (!s390_kobj) { - rc = -ENOMEM; + rc = sysfs_create_mount_point(hypervisor_kobj, "s390"); + if (rc) goto fail_hypfs_diag0c_exit; - } rc = register_filesystem(&hypfs_type); if (rc) goto fail_filesystem; return 0; fail_filesystem: - kobject_put(s390_kobj); + sysfs_remove_mount_point(hypervisor_kobj, "s390"); fail_hypfs_diag0c_exit: hypfs_diag0c_exit(); fail_hypfs_sprp_exit: @@ -510,7 +501,7 @@ fail_dbfs_exit: static void __exit hypfs_exit(void) { unregister_filesystem(&hypfs_type); - kobject_put(s390_kobj); + sysfs_remove_mount_point(hypervisor_kobj, "s390"); hypfs_diag0c_exit(); hypfs_sprp_exit(); hypfs_vm_exit(); diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 35d34635e4f1..402b9c85a894 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -332,7 +332,7 @@ static void describe_addr(struct KBacktraceIterator *kbt, } if (vma->vm_file) { - p = d_path(&vma->vm_file->f_path, buf, bufsize); + p = file_path(vma->vm_file, buf, bufsize); if (IS_ERR(p)) p = "?"; name = kbasename(p); diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c index 88c7016492c4..97bbb6060b25 100644 --- a/arch/tile/lib/memcpy_user_64.c +++ b/arch/tile/lib/memcpy_user_64.c @@ -28,7 +28,7 @@ #define _ST(p, inst, v) \ ({ \ asm("1: " #inst " %0, %1;" \ - ".pushsection .coldtext.memcpy,\"ax\";" \ + ".pushsection .coldtext,\"ax\";" \ "2: { move r0, %2; jrp lr };" \ ".section __ex_table,\"a\";" \ ".align 8;" \ @@ -41,7 +41,7 @@ ({ \ unsigned long __v; \ asm("1: " #inst " %0, %1;" \ - ".pushsection .coldtext.memcpy,\"ax\";" \ + ".pushsection .coldtext,\"ax\";" \ "2: { move r0, %2; jrp lr };" \ ".section __ex_table,\"a\";" \ ".align 8;" \ diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c index f7ddae3725a4..6225cc998db1 100644 --- a/arch/tile/mm/elf.c +++ b/arch/tile/mm/elf.c @@ -56,7 +56,7 @@ static int notify_exec(struct mm_struct *mm) if (exe_file == NULL) goto done_free; - path = d_path(&exe_file->f_path, buf, PAGE_SIZE); + path = file_path(exe_file, buf, PAGE_SIZE); if (IS_ERR(path)) goto done_put; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 55bced17dc95..3dbb7e7909ca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -254,6 +254,11 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y +config KASAN_SHADOW_OFFSET + hex + depends on KASAN + default 0xdffffc0000000000 + config HAVE_INTEL_TXT def_bool y depends on INTEL_IOMMU && ACPI @@ -2015,7 +2020,7 @@ config CMDLINE_BOOL To compile command line arguments into the kernel, set this option to 'Y', then fill in the - the boot arguments in CONFIG_CMDLINE. + boot arguments in CONFIG_CMDLINE. Systems with fully functional boot loaders (i.e. non-embedded) should leave this option set to 'N'. diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h index 99efebb2f69d..ca3ce9ab9385 100644 --- a/arch/x86/include/asm/espfix.h +++ b/arch/x86/include/asm/espfix.h @@ -9,7 +9,7 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); extern void init_espfix_bsp(void); -extern void init_espfix_ap(void); +extern void init_espfix_ap(int cpu); #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h new file mode 100644 index 000000000000..200ec2e7821d --- /dev/null +++ b/arch/x86/include/asm/intel_pmc_ipc.h @@ -0,0 +1,82 @@ +#ifndef _ASM_X86_INTEL_PMC_IPC_H_ +#define _ASM_X86_INTEL_PMC_IPC_H_ + +/* Commands */ +#define PMC_IPC_PMIC_ACCESS 0xFF +#define PMC_IPC_PMIC_ACCESS_READ 0x0 +#define PMC_IPC_PMIC_ACCESS_WRITE 0x1 +#define PMC_IPC_USB_PWR_CTRL 0xF0 +#define PMC_IPC_PMIC_BLACKLIST_SEL 0xEF +#define PMC_IPC_PHY_CONFIG 0xEE +#define PMC_IPC_NORTHPEAK_CTRL 0xED +#define PMC_IPC_PM_DEBUG 0xEC +#define PMC_IPC_PMC_TELEMTRY 0xEB +#define PMC_IPC_PMC_FW_MSG_CTRL 0xEA + +/* IPC return code */ +#define IPC_ERR_NONE 0 +#define IPC_ERR_CMD_NOT_SUPPORTED 1 +#define IPC_ERR_CMD_NOT_SERVICED 2 +#define IPC_ERR_UNABLE_TO_SERVICE 3 +#define IPC_ERR_CMD_INVALID 4 +#define IPC_ERR_CMD_FAILED 5 +#define IPC_ERR_EMSECURITY 6 +#define IPC_ERR_UNSIGNEDKERNEL 7 + +#if IS_ENABLED(CONFIG_INTEL_PMC_IPC) + +/* + * intel_pmc_ipc_simple_command + * @cmd: command + * @sub: sub type + */ +int intel_pmc_ipc_simple_command(int cmd, int sub); + +/* + * intel_pmc_ipc_raw_cmd + * @cmd: command + * @sub: sub type + * @in: input data + * @inlen: input length in bytes + * @out: output data + * @outlen: output length in dwords + * @sptr: data writing to SPTR register + * @dptr: data writing to DPTR register + */ +int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, + u32 *out, u32 outlen, u32 dptr, u32 sptr); + +/* + * intel_pmc_ipc_command + * @cmd: command + * @sub: sub type + * @in: input data + * @inlen: input length in bytes + * @out: output data + * @outlen: output length in dwords + */ +int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, + u32 *out, u32 outlen); + +#else + +static inline int intel_pmc_ipc_simple_command(int cmd, int sub) +{ + return -EINVAL; +} + +static inline int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, + u32 *out, u32 outlen, u32 dptr, u32 sptr) +{ + return -EINVAL; +} + +static inline int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, + u32 *out, u32 outlen) +{ + return -EINVAL; +} + +#endif /*CONFIG_INTEL_PMC_IPC*/ + +#endif diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h index 8b22422fbad8..74a2a8dc9908 100644 --- a/arch/x86/include/asm/kasan.h +++ b/arch/x86/include/asm/kasan.h @@ -14,15 +14,11 @@ #ifndef __ASSEMBLY__ -extern pte_t kasan_zero_pte[]; -extern pte_t kasan_zero_pmd[]; -extern pte_t kasan_zero_pud[]; - #ifdef CONFIG_KASAN -void __init kasan_map_early_shadow(pgd_t *pgd); +void __init kasan_early_init(void); void __init kasan_init(void); #else -static inline void kasan_map_early_shadow(pgd_t *pgd) { } +static inline void kasan_early_init(void) { } static inline void kasan_init(void) { } #endif diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c7fa57b529d2..2a7f5d782c33 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -607,7 +607,7 @@ struct kvm_arch { struct kvm_pic *vpic; struct kvm_ioapic *vioapic; struct kvm_pit *vpit; - int vapics_in_nmi_mode; + atomic_t vapics_in_nmi_mode; struct mutex apic_map_lock; struct kvm_apic_map *apic_map; diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index ce6068dbcfbc..8fba544e9cc4 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -199,6 +199,17 @@ #define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 #define HV_X64_MSR_STIMER3_COUNT 0x400000B7 +/* Hyper-V guest crash notification MSR's */ +#define HV_X64_MSR_CRASH_P0 0x40000100 +#define HV_X64_MSR_CRASH_P1 0x40000101 +#define HV_X64_MSR_CRASH_P2 0x40000102 +#define HV_X64_MSR_CRASH_P3 0x40000103 +#define HV_X64_MSR_CRASH_P4 0x40000104 +#define HV_X64_MSR_CRASH_CTL 0x40000105 +#define HV_X64_MSR_CRASH_CTL_NOTIFY (1ULL << 63) +#define HV_X64_MSR_CRASH_PARAMS \ + (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) + #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 28eba2d38b15..f813261d9740 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -409,12 +409,6 @@ static void __setup_vector_irq(int cpu) int irq, vector; struct apic_chip_data *data; - /* - * vector_lock will make sure that we don't run into irq vector - * assignments that might be happening on another cpu in parallel, - * while we setup our initial vector to irq mappings. - */ - raw_spin_lock(&vector_lock); /* Mark the inuse vectors */ for_each_active_irq(irq) { data = apic_chip_data(irq_get_irq_data(irq)); @@ -436,16 +430,16 @@ static void __setup_vector_irq(int cpu) if (!cpumask_test_cpu(cpu, data->domain)) per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; } - raw_spin_unlock(&vector_lock); } /* - * Setup the vector to irq mappings. + * Setup the vector to irq mappings. Must be called with vector_lock held. */ void setup_vector_irq(int cpu) { int irq; + lockdep_assert_held(&vector_lock); /* * On most of the platforms, legacy PIC delivers the interrupts on the * boot cpu. But there are certain platforms where PIC interrupts are diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9fc5e3d9d9c8..922c5e0cea4c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -742,7 +742,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) cpu_detect(c); get_cpu_vendor(c); get_cpu_cap(c); - fpu__init_system(c); if (this_cpu->c_early_init) this_cpu->c_early_init(c); @@ -754,6 +753,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) this_cpu->c_bsp_init(c); setup_force_cpu_cap(X86_FEATURE_ALWAYS); + fpu__init_system(c); } void __init early_cpu_init(void) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5801a14f7524..3658de47900f 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -357,34 +357,24 @@ void x86_release_hardware(void) */ int x86_add_exclusive(unsigned int what) { - int ret = -EBUSY, i; - - if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) - return 0; + int i; - mutex_lock(&pmc_reserve_mutex); - for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { - if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) - goto out; + if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { + mutex_lock(&pmc_reserve_mutex); + for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { + if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) + goto fail_unlock; + } + atomic_inc(&x86_pmu.lbr_exclusive[what]); + mutex_unlock(&pmc_reserve_mutex); } - atomic_inc(&x86_pmu.lbr_exclusive[what]); - ret = 0; + atomic_inc(&active_events); + return 0; -out: +fail_unlock: mutex_unlock(&pmc_reserve_mutex); - - /* - * Assuming that all exclusive events will share the PMI handler - * (which checks active_events for whether there is work to do), - * we can bump active_events counter right here, except for - * x86_lbr_exclusive_lbr events that go through x86_pmu_event_init() - * path, which already bumps active_events for them. - */ - if (!ret && what != x86_lbr_exclusive_lbr) - atomic_inc(&active_events); - - return ret; + return -EBUSY; } void x86_del_exclusive(unsigned int what) diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 89427d8d4fc5..eec40f595ab9 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -175,7 +175,9 @@ static __init void early_serial_init(char *s) } if (*s) { - if (kstrtoul(s, 0, &baud) < 0 || baud == 0) + baud = simple_strtoull(s, &e, 0); + + if (baud == 0 || s == e) baud = DEFAULT_BAUD; } diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index f5d0730e7b08..ce95676abd60 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -131,25 +131,24 @@ void __init init_espfix_bsp(void) init_espfix_random(); /* The rest is the same as for any other processor */ - init_espfix_ap(); + init_espfix_ap(0); } -void init_espfix_ap(void) +void init_espfix_ap(int cpu) { - unsigned int cpu, page; + unsigned int page; unsigned long addr; pud_t pud, *pud_p; pmd_t pmd, *pmd_p; pte_t pte, *pte_p; - int n; + int n, node; void *stack_page; pteval_t ptemask; /* We only have to do this once... */ - if (likely(this_cpu_read(espfix_stack))) + if (likely(per_cpu(espfix_stack, cpu))) return; /* Already initialized */ - cpu = smp_processor_id(); addr = espfix_base_addr(cpu); page = cpu/ESPFIX_STACKS_PER_PAGE; @@ -165,12 +164,15 @@ void init_espfix_ap(void) if (stack_page) goto unlock_done; + node = cpu_to_node(cpu); ptemask = __supported_pte_mask; pud_p = &espfix_pud_page[pud_index(addr)]; pud = *pud_p; if (!pud_present(pud)) { - pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); + struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); + + pmd_p = (pmd_t *)page_address(page); pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); for (n = 0; n < ESPFIX_PUD_CLONES; n++) @@ -180,7 +182,9 @@ void init_espfix_ap(void) pmd_p = pmd_offset(&pud, addr); pmd = *pmd_p; if (!pmd_present(pmd)) { - pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); + struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); + + pte_p = (pte_t *)page_address(page); pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); for (n = 0; n < ESPFIX_PMD_CLONES; n++) @@ -188,7 +192,7 @@ void init_espfix_ap(void) } pte_p = pte_offset_kernel(&pmd, addr); - stack_page = (void *)__get_free_page(GFP_KERNEL); + stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); for (n = 0; n < ESPFIX_PTE_CLONES; n++) set_pte(&pte_p[n*PTE_STRIDE], pte); @@ -199,7 +203,7 @@ void init_espfix_ap(void) unlock_done: mutex_unlock(&espfix_init_mutex); done: - this_cpu_write(espfix_stack, addr); - this_cpu_write(espfix_waddr, (unsigned long)stack_page - + (addr & ~PAGE_MASK)); + per_cpu(espfix_stack, cpu) = addr; + per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page + + (addr & ~PAGE_MASK); } diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index fc878fee6a51..32826791e675 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -95,11 +95,12 @@ static void __init fpu__init_system_mxcsr(void) unsigned int mask = 0; if (cpu_has_fxsr) { - struct fxregs_state fx_tmp __aligned(32) = { }; + /* Static because GCC does not get 16-byte stack alignment right: */ + static struct fxregs_state fxregs __initdata; - asm volatile("fxsave %0" : "+m" (fx_tmp)); + asm volatile("fxsave %0" : "+m" (fxregs)); - mask = fx_tmp.mxcsr_mask; + mask = fxregs.mxcsr_mask; /* * If zero then use the default features mask, diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 5a4668136e98..f129a9af6357 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) /* Kill off the identity-map trampoline */ reset_early_page_tables(); - kasan_map_early_shadow(early_level4_pgt); - - /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); + clear_page(init_level4_pgt); + + kasan_early_init(); + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) set_intr_gate(i, early_idt_handler_array[i]); load_idt((const struct desc_ptr *)&idt_descr); @@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) */ load_ucode_bsp(); - clear_page(init_level4_pgt); /* set init_level4_pgt kernel high mapping*/ init_level4_pgt[511] = early_level4_pgt[511]; - kasan_map_early_shadow(init_level4_pgt); - x86_64_start_reservations(real_mode_data); } diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index e5c27f729a38..1d40ca8a73f2 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -516,38 +516,9 @@ ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 -#ifdef CONFIG_KASAN -#define FILL(VAL, COUNT) \ - .rept (COUNT) ; \ - .quad (VAL) ; \ - .endr - -NEXT_PAGE(kasan_zero_pte) - FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512) -NEXT_PAGE(kasan_zero_pmd) - FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512) -NEXT_PAGE(kasan_zero_pud) - FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512) - -#undef FILL -#endif - - #include "../../x86/xen/xen-head.S" __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE -#ifdef CONFIG_KASAN -/* - * This page used as early shadow. We don't use empty_zero_page - * at early stages, stack instrumentation could write some garbage - * to this page. - * Latter we reuse it as zero shadow for large ranges of memory - * that allowed to access, but not instrumented by kasan - * (vmalloc/vmemmap ...). - */ -NEXT_PAGE(kasan_zero_page) - .skip PAGE_SIZE -#endif diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 88b366487b0e..c7dfe1be784e 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -347,14 +347,22 @@ int check_irq_vectors_for_cpu_disable(void) if (!desc) continue; + /* + * Protect against concurrent action removal, + * affinity changes etc. + */ + raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); cpumask_copy(&affinity_new, data->affinity); cpumask_clear_cpu(this_cpu, &affinity_new); /* Do not count inactive or per-cpu irqs. */ - if (!irq_has_action(irq) || irqd_is_per_cpu(data)) + if (!irq_has_action(irq) || irqd_is_per_cpu(data)) { + raw_spin_unlock(&desc->lock); continue; + } + raw_spin_unlock(&desc->lock); /* * A single irq may be mapped to multiple * cpu's vector_irq[] (for example IOAPIC cluster @@ -385,6 +393,9 @@ int check_irq_vectors_for_cpu_disable(void) * vector. If the vector is marked in the used vectors * bitmap or an irq is assigned to it, we don't count * it as available. + * + * As this is an inaccurate snapshot anyway, we can do + * this w/o holding vector_lock. */ for (vector = FIRST_EXTERNAL_VECTOR; vector < first_system_vector; vector++) { @@ -486,6 +497,11 @@ void fixup_irqs(void) */ mdelay(1); + /* + * We can walk the vector array of this cpu without holding + * vector_lock because the cpu is already marked !online, so + * nothing else will touch it. + */ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { unsigned int irr; @@ -497,9 +513,9 @@ void fixup_irqs(void) irq = __this_cpu_read(vector_irq[vector]); desc = irq_to_desc(irq); + raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); - raw_spin_lock(&desc->lock); if (chip->irq_retrigger) { chip->irq_retrigger(data); __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d3b95b89e9b2..80f874bf999e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -461,19 +461,18 @@ static void __init e820_reserve_setup_data(void) { struct setup_data *data; u64 pa_data; - int found = 0; pa_data = boot_params.hdr.setup_data; + if (!pa_data) + return; + while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); e820_update_range(pa_data, sizeof(*data)+data->len, E820_RAM, E820_RESERVED_KERN); - found = 1; pa_data = data->next; early_memunmap(data, sizeof(*data)); } - if (!found) - return; sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); memcpy(&e820_saved, &e820, sizeof(struct e820map)); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8add66b22f33..d3010aa79daf 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -171,11 +171,6 @@ static void smp_callin(void) apic_ap_setup(); /* - * Need to setup vector mappings before we enable interrupts. - */ - setup_vector_irq(smp_processor_id()); - - /* * Save our processor parameters. Note: this information * is needed for clock calibration. */ @@ -239,18 +234,13 @@ static void notrace start_secondary(void *unused) check_tsc_sync_target(); /* - * Enable the espfix hack for this CPU - */ -#ifdef CONFIG_X86_ESPFIX64 - init_espfix_ap(); -#endif - - /* - * We need to hold vector_lock so there the set of online cpus - * does not change while we are assigning vectors to cpus. Holding - * this lock ensures we don't half assign or remove an irq from a cpu. + * Lock vector_lock and initialize the vectors on this cpu + * before setting the cpu online. We must set it online with + * vector_lock held to prevent a concurrent setup/teardown + * from seeing a half valid vector space. */ lock_vector_lock(); + setup_vector_irq(smp_processor_id()); set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); cpu_set_state_online(smp_processor_id()); @@ -854,6 +844,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) initial_code = (unsigned long)start_secondary; stack_start = idle->thread.sp; + /* + * Enable the espfix hack for this CPU + */ +#ifdef CONFIG_X86_ESPFIX64 + init_espfix_ap(cpu); +#endif + /* So we see what's up */ announce_cpu(cpu, apicid); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 505449700e0c..7437b41f6a47 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -598,10 +598,19 @@ static unsigned long quick_pit_calibrate(void) if (!pit_expect_msb(0xff-i, &delta, &d2)) break; + delta -= tsc; + + /* + * Extrapolate the error and fail fast if the error will + * never be below 500 ppm. + */ + if (i == 1 && + d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11) + return 0; + /* * Iterate until the error is less than 500 ppm */ - delta -= tsc; if (d1+d2 >= delta >> 11) continue; diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 4dce6f8b6129..f90952f64e79 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work) * LVT0 to NMI delivery. Other PIC interrupts are just sent to * VCPU0, and only if its LVT0 is in EXTINT mode. */ - if (kvm->arch.vapics_in_nmi_mode > 0) + if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0) kvm_for_each_vcpu(i, vcpu, kvm) kvm_apic_nmi_wd_deliver(vcpu); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 36e9de1b4127..954e98a8c2e3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1257,16 +1257,17 @@ static void start_apic_timer(struct kvm_lapic *apic) static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) { - int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0)); + bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val); - if (apic_lvt_nmi_mode(lvt0_val)) { - if (!nmi_wd_enabled) { + if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) { + apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode; + if (lvt0_in_nmi_mode) { apic_debug("Receive NMI setting on APIC_LVT0 " "for cpu %d\n", apic->vcpu->vcpu_id); - apic->vcpu->kvm->arch.vapics_in_nmi_mode++; - } - } else if (nmi_wd_enabled) - apic->vcpu->kvm->arch.vapics_in_nmi_mode--; + atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); + } else + atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); + } } static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) @@ -1597,6 +1598,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) apic_set_reg(apic, APIC_LVT0, SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); + apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); apic_set_reg(apic, APIC_DFR, 0xffffffffU); apic_set_spiv(apic, 0xff); @@ -1822,6 +1824,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, apic_update_ppr(apic); hrtimer_cancel(&apic->lapic_timer.timer); apic_update_lvtt(apic); + apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); update_divide_count(apic); start_apic_timer(apic); apic->irr_pending = true; diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index f2f4e10ab772..71952748222a 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -26,6 +26,7 @@ struct kvm_lapic { struct kvm_vcpu *vcpu; bool sw_enabled; bool irr_pending; + bool lvt0_in_nmi_mode; /* Number of bits set in ISR. */ s16 isr_count; /* The highest vector set in ISR; if -1 - invalid, must scan ISR. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ac165c2fb8e5..bbaf44e8f0d3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2379,8 +2379,6 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { - u64 data; - switch (msr_info->index) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: @@ -2453,7 +2451,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* TSC increment by tick */ msr_info->data = 1000ULL; /* CPU multiplier */ - data |= (((uint64_t)4ULL) << 40); + msr_info->data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: msr_info->data = vcpu->arch.efer; diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index ddf9ecb53cc3..e342586db6e4 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) unsigned long ret; if (__range_not_ok(from, n, TASK_SIZE)) - return 0; + return n; /* * Even though this function is typically called from NMI/IRQ context diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 4860906c6b9f..e1840f3db5b5 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -1,3 +1,4 @@ +#define pr_fmt(fmt) "kasan: " fmt #include <linux/bootmem.h> #include <linux/kasan.h> #include <linux/kdebug.h> @@ -11,7 +12,19 @@ extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern struct range pfn_mapped[E820_X_MAX]; -extern unsigned char kasan_zero_page[PAGE_SIZE]; +static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; +static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; +static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; + +/* + * This page used as early shadow. We don't use empty_zero_page + * at early stages, stack instrumentation could write some garbage + * to this page. + * Latter we reuse it as zero shadow for large ranges of memory + * that allowed to access, but not instrumented by kasan + * (vmalloc/vmemmap ...). + */ +static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; static int __init map_range(struct range *range) { @@ -36,7 +49,7 @@ static void __init clear_pgds(unsigned long start, pgd_clear(pgd_offset_k(start)); } -void __init kasan_map_early_shadow(pgd_t *pgd) +static void __init kasan_map_early_shadow(pgd_t *pgd) { int i; unsigned long start = KASAN_SHADOW_START; @@ -73,7 +86,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr, while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { WARN_ON(!pmd_none(*pmd)); set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) - | __PAGE_KERNEL_RO)); + | _KERNPG_TABLE)); addr += PMD_SIZE; pmd = pmd_offset(pud, addr); } @@ -99,7 +112,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr, while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { WARN_ON(!pud_none(*pud)); set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) - | __PAGE_KERNEL_RO)); + | _KERNPG_TABLE)); addr += PUD_SIZE; pud = pud_offset(pgd, addr); } @@ -124,7 +137,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end) while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { WARN_ON(!pgd_none(*pgd)); set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) - | __PAGE_KERNEL_RO)); + | _KERNPG_TABLE)); addr += PGDIR_SIZE; pgd = pgd_offset_k(addr); } @@ -166,6 +179,26 @@ static struct notifier_block kasan_die_notifier = { }; #endif +void __init kasan_early_init(void) +{ + int i; + pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL; + pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE; + pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; + + for (i = 0; i < PTRS_PER_PTE; i++) + kasan_zero_pte[i] = __pte(pte_val); + + for (i = 0; i < PTRS_PER_PMD; i++) + kasan_zero_pmd[i] = __pmd(pmd_val); + + for (i = 0; i < PTRS_PER_PUD; i++) + kasan_zero_pud[i] = __pud(pud_val); + + kasan_map_early_shadow(early_level4_pgt); + kasan_map_early_shadow(init_level4_pgt); +} + void __init kasan_init(void) { int i; @@ -176,6 +209,7 @@ void __init kasan_init(void) memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt)); load_cr3(early_level4_pgt); + __flush_tlb_all(); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); @@ -202,5 +236,8 @@ void __init kasan_init(void) memset(kasan_zero_page, 0, PAGE_SIZE); load_cr3(init_level4_pgt); + __flush_tlb_all(); init_task.kasan_depth = 0; + + pr_info("Kernel address sanitizer initialized\n"); } |