diff options
author | Alexander Graf <agraf@suse.de> | 2013-08-29 02:41:59 +0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2013-08-29 02:41:59 +0400 |
commit | bf550fc93d9855872a95e69e4002256110d89858 (patch) | |
tree | 10876bb4304bffe54c4160a132e7b8de6577ac4e /arch | |
parent | 7e48c101e0c53e6095c5f4f5e63d14df50aae8fc (diff) | |
parent | cc2df20c7c4ce594c3e17e9cc260c330646012c8 (diff) | |
download | linux-bf550fc93d9855872a95e69e4002256110d89858.tar.xz |
Merge remote-tracking branch 'origin/next' into kvm-ppc-next
Conflicts:
mm/Kconfig
CMA DMA split and ZSWAP introduction were conflicting, fix up manually.
Diffstat (limited to 'arch')
901 files changed, 14202 insertions, 11906 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index a4429bcd609e..8d2ae24b9f4a 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -365,6 +365,9 @@ config HAVE_IRQ_TIME_ACCOUNTING config HAVE_ARCH_TRANSPARENT_HUGEPAGE bool +config HAVE_ARCH_SOFT_DIRTY + bool + config HAVE_MOD_ARCH_SPECIFIC bool help diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h index c5b5d6bac9ed..14ce27bccd24 100644 --- a/arch/alpha/include/asm/mmzone.h +++ b/arch/alpha/include/asm/mmzone.h @@ -71,8 +71,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) - #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32)) #define pte_pfn(pte) (pte_val(pte) >> 32) diff --git a/arch/alpha/include/uapi/asm/fcntl.h b/arch/alpha/include/uapi/asm/fcntl.h index dfdadb0b4bef..09f49a6b87d1 100644 --- a/arch/alpha/include/uapi/asm/fcntl.h +++ b/arch/alpha/include/uapi/asm/fcntl.h @@ -32,7 +32,7 @@ #define O_SYNC (__O_SYNC|O_DSYNC) #define O_PATH 040000000 -#define O_TMPFILE 0100000000 +#define __O_TMPFILE 0100000000 #define F_GETLK 7 #define F_SETLK 8 diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index eee6ea76bdaf..467de010ea7e 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -81,4 +81,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 1d4aabfcf9a1..837c0fa58317 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -238,8 +238,8 @@ nautilus_init_pci(void) if (pci_mem < memtop) memtop = pci_mem; if (memtop > alpha_mv.min_mem_address) { - free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address), - (unsigned long)__va(memtop), 0, NULL); + free_reserved_area(__va(alpha_mv.min_mem_address), + __va(memtop), -1, NULL); printk("nautilus_init_pci: %ldk freed\n", (memtop - alpha_mv.min_mem_address) >> 10); } diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 0ba85ee4a466..a1bea91df56a 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -276,56 +276,25 @@ srm_paging_stop (void) } #endif -#ifndef CONFIG_DISCONTIGMEM -static void __init -printk_memory_info(void) -{ - unsigned long codesize, reservedpages, datasize, initsize, tmp; - extern int page_is_ram(unsigned long) __init; - - /* printk all informations */ - reservedpages = 0; - for (tmp = 0; tmp < max_low_pfn; tmp++) - /* - * Only count reserved RAM pages - */ - if (page_is_ram(tmp) && PageReserved(mem_map+tmp)) - reservedpages++; - - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_data; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - max_mapnr << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10); -} - void __init mem_init(void) { - max_mapnr = num_physpages = max_low_pfn; - totalram_pages += free_all_bootmem(); + set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); - - printk_memory_info(); + free_all_bootmem(); + mem_init_print_info(NULL); } -#endif /* CONFIG_DISCONTIGMEM */ void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 33885048fa36..d543d71c28b4 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -129,8 +129,6 @@ setup_memory_node(int nid, void *kernel_end) if (node_max_pfn > max_low_pfn) max_pfn = max_low_pfn = node_max_pfn; - num_physpages += node_max_pfn - node_min_pfn; - #if 0 /* we'll try this one again in a little while */ /* Cute trick to make sure our local node data is on local memory */ node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); @@ -321,41 +319,3 @@ void __init paging_init(void) /* Initialize the kernel's ZERO_PGE. */ memset((void *)ZERO_PGE, 0, PAGE_SIZE); } - -void __init mem_init(void) -{ - unsigned long codesize, reservedpages, datasize, initsize, pfn; - extern int page_is_ram(unsigned long) __init; - unsigned long nid, i; - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - - reservedpages = 0; - for_each_online_node(nid) { - /* - * This will free up the bootmem, ie, slot 0 memory - */ - totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); - - pfn = NODE_DATA(nid)->node_start_pfn; - for (i = 0; i < node_spanned_pages(nid); i++, pfn++) - if (page_is_ram(pfn) && - PageReserved(nid_page_nr(nid, i))) - reservedpages++; - } - - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_data; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, " - "%luk data, %luk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10); -#if 0 - mem_stress(); -#endif -} diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 4a0e54fc01b2..68fcbb2d59e2 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -34,6 +34,7 @@ config ARC select OF select OF_EARLY_FLATTREE select PERF_USE_VMALLOC + select HAVE_DEBUG_STACKOVERFLOW config SCHED_OMIT_FRAME_POINTER def_bool y diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug index 962c6099659e..a7fc0da25650 100644 --- a/arch/arc/Kconfig.debug +++ b/arch/arc/Kconfig.debug @@ -15,13 +15,6 @@ config EARLY_PRINTK with klogd/syslogd or the X server. You should normally N here, unless you want to debug such a crash. -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - help - This option will cause messages to be printed if free stack space - drops below a certain limit. - config 16KSTACKS bool "Use 16Kb for kernel stacks instead of 8Kb" help diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi index 941ad118a7e7..d9f8249aa66e 100644 --- a/arch/arc/boot/dts/abilis_tb100.dtsi +++ b/arch/arc/boot/dts/abilis_tb100.dtsi @@ -21,10 +21,6 @@ /include/ "abilis_tb10x.dtsi" -/* interrupt specifiers - * -------------------- - * 0: rising, 1: low, 2: high, 3: falling, - */ / { clock-frequency = <500000000>; /* 500 MHZ */ @@ -173,7 +169,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF140000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -185,7 +181,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF141000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -197,7 +193,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF142000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -209,7 +205,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF143000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -221,7 +217,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF144000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -233,7 +229,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF145000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -245,7 +241,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF146000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -257,7 +253,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF147000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -269,7 +265,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF148000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -281,7 +277,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF149000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -293,7 +289,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF14A000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -305,7 +301,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF14B000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -317,7 +313,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF14C000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -329,7 +325,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF14D000 0x1000>; gpio-controller; #gpio-cells = <1>; diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts index 0fa0d4abe795..ebc313a9f5b2 100644 --- a/arch/arc/boot/dts/abilis_tb100_dvk.dts +++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts @@ -45,19 +45,19 @@ }; i2c0: i2c@FF120000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; i2c1: i2c@FF121000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; i2c2: i2c@FF122000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; i2c3: i2c@FF123000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; i2c4: i2c@FF124000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; leds { diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi index fd25c212049f..da8ca7941e67 100644 --- a/arch/arc/boot/dts/abilis_tb101.dtsi +++ b/arch/arc/boot/dts/abilis_tb101.dtsi @@ -21,10 +21,6 @@ /include/ "abilis_tb10x.dtsi" -/* interrupt specifiers - * -------------------- - * 0: rising, 1: low, 2: high, 3: falling, - */ / { clock-frequency = <500000000>; /* 500 MHZ */ @@ -182,7 +178,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF140000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -194,7 +190,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF141000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -206,7 +202,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF142000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -218,7 +214,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF143000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -230,7 +226,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF144000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -242,7 +238,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF145000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -254,7 +250,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF146000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -266,7 +262,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF147000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -278,7 +274,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF148000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -290,7 +286,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF149000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -302,7 +298,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF14A000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -314,7 +310,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF14B000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -326,7 +322,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF14C000 0x1000>; gpio-controller; #gpio-cells = <1>; @@ -338,7 +334,7 @@ interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&tb10x_ictl>; - interrupts = <27 1>; + interrupts = <27 2>; reg = <0xFF14D000 0x1000>; gpio-controller; #gpio-cells = <1>; diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts index a4d80ce283ae..b204657993aa 100644 --- a/arch/arc/boot/dts/abilis_tb101_dvk.dts +++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts @@ -45,19 +45,19 @@ }; i2c0: i2c@FF120000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; i2c1: i2c@FF121000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; i2c2: i2c@FF122000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; i2c3: i2c@FF123000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; i2c4: i2c@FF124000 { - sda-hold-time = <432>; + i2c-sda-hold-time-ns = <432>; }; leds { diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi index b97e3051ba4b..edf56f4749e1 100644 --- a/arch/arc/boot/dts/abilis_tb10x.dtsi +++ b/arch/arc/boot/dts/abilis_tb10x.dtsi @@ -19,10 +19,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* interrupt specifiers - * -------------------- - * 0: rising, 1: low, 2: high, 3: falling, - */ / { compatible = "abilis,arc-tb10x"; @@ -78,7 +74,7 @@ #interrupt-cells = <1>; }; tb10x_ictl: pic@fe002000 { - compatible = "abilis,tb10x_ictl"; + compatible = "abilis,tb10x-ictl"; reg = <0xFE002000 0x20>; interrupt-controller; #interrupt-cells = <2>; @@ -91,7 +87,7 @@ compatible = "snps,dw-apb-uart"; reg = <0xFF100000 0x100>; clock-frequency = <166666666>; - interrupts = <25 1>; + interrupts = <25 8>; reg-shift = <2>; reg-io-width = <4>; interrupt-parent = <&tb10x_ictl>; @@ -100,7 +96,7 @@ compatible = "snps,dwmac-3.70a","snps,dwmac"; reg = <0xFE100000 0x1058>; interrupt-parent = <&tb10x_ictl>; - interrupts = <6 1>; + interrupts = <6 8>; interrupt-names = "macirq"; clocks = <&ahb_clk>; clock-names = "stmmaceth"; @@ -109,7 +105,7 @@ compatible = "snps,dma-spear1340"; reg = <0xFE000000 0x400>; interrupt-parent = <&tb10x_ictl>; - interrupts = <14 1>; + interrupts = <14 8>; dma-channels = <6>; dma-requests = <0>; dma-masters = <1>; @@ -128,7 +124,7 @@ compatible = "snps,designware-i2c"; reg = <0xFF120000 0x1000>; interrupt-parent = <&tb10x_ictl>; - interrupts = <12 1>; + interrupts = <12 8>; clocks = <&ahb_clk>; }; i2c1: i2c@FF121000 { @@ -137,7 +133,7 @@ compatible = "snps,designware-i2c"; reg = <0xFF121000 0x1000>; interrupt-parent = <&tb10x_ictl>; - interrupts = <12 1>; + interrupts = <12 8>; clocks = <&ahb_clk>; }; i2c2: i2c@FF122000 { @@ -146,7 +142,7 @@ compatible = "snps,designware-i2c"; reg = <0xFF122000 0x1000>; interrupt-parent = <&tb10x_ictl>; - interrupts = <12 1>; + interrupts = <12 8>; clocks = <&ahb_clk>; }; i2c3: i2c@FF123000 { @@ -155,7 +151,7 @@ compatible = "snps,designware-i2c"; reg = <0xFF123000 0x1000>; interrupt-parent = <&tb10x_ictl>; - interrupts = <12 1>; + interrupts = <12 8>; clocks = <&ahb_clk>; }; i2c4: i2c@FF124000 { @@ -164,7 +160,7 @@ compatible = "snps,designware-i2c"; reg = <0xFF124000 0x1000>; interrupt-parent = <&tb10x_ictl>; - interrupts = <12 1>; + interrupts = <12 8>; clocks = <&ahb_clk>; }; @@ -176,7 +172,7 @@ num-cs = <1>; reg = <0xFE010000 0x20>; interrupt-parent = <&tb10x_ictl>; - interrupts = <26 1>; + interrupts = <26 8>; clocks = <&ahb_clk>; }; spi1: spi@0xFE011000 { @@ -187,7 +183,7 @@ num-cs = <2>; reg = <0xFE011000 0x20>; interrupt-parent = <&tb10x_ictl>; - interrupts = <10 1>; + interrupts = <10 8>; clocks = <&ahb_clk>; }; @@ -195,7 +191,7 @@ compatible = "abilis,tb100-tsm"; reg = <0xff316000 0x400>; interrupt-parent = <&tb10x_ictl>; - interrupts = <17 1>; + interrupts = <17 8>; output-clkdiv = <4>; global-packet-delay = <0x21>; port-packet-delay = <0>; @@ -213,7 +209,7 @@ "cpuctrl", "a6it_int_force"; interrupt-parent = <&tb10x_ictl>; - interrupts = <20 1>, <19 1>; + interrupts = <20 2>, <19 2>; interrupt-names = "cmd_irq", "event_irq"; }; tb10x_mdsc0: tb10x-mdscr@FF300000 { @@ -239,7 +235,7 @@ compatible = "abilis,tb100-wfb"; reg = <0xff319000 0x1000>; interrupt-parent = <&tb10x_ictl>; - interrupts = <16 1>; + interrupts = <16 8>; }; }; }; diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts index bae4f936cb03..4fb2d6f655bd 100644 --- a/arch/arc/boot/dts/angel4.dts +++ b/arch/arc/boot/dts/angel4.dts @@ -51,5 +51,21 @@ current-speed = <115200>; status = "okay"; }; + + ethernet@c0fc2000 { + compatible = "snps,arc-emac"; + reg = <0xc0fc2000 0x3c>; + interrupts = <6>; + mac-address = [ 00 11 22 33 44 55 ]; + clock-frequency = <80000000>; + max-speed = <100>; + phy = <&phy0>; + + #address-cells = <1>; + #size-cells = <0>; + phy0: ethernet-phy@0 { + reg = <1>; + }; + }; }; }; diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig index c109af320274..4ca50f1f8d05 100644 --- a/arch/arc/configs/fpga_defconfig +++ b/arch/arc/configs/fpga_defconfig @@ -38,6 +38,9 @@ CONFIG_INET=y # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_BLK_DEV is not set +CONFIG_NETDEVICES=y +CONFIG_ARC_EMAC=y +CONFIG_LXT_PHY=y # CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 318164cabdfc..0fd1f0d515ff 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -207,8 +207,10 @@ out_of_memory: } up_read(&mm->mmap_sem); - if (user_mode(regs)) - do_group_exit(SIGKILL); /* This will never return */ + if (user_mode(regs)) { + pagefault_out_of_memory(); + return; + } goto no_context; diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 4a177365b2c4..a08ce7185423 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -74,7 +74,7 @@ void __init setup_arch_memory(void) /* Last usable page of low mem (no HIGHMEM yet for ARC port) */ max_low_pfn = max_pfn = PFN_DOWN(end_mem); - max_mapnr = num_physpages = max_low_pfn - min_low_pfn; + max_mapnr = max_low_pfn - min_low_pfn; /*------------- reserve kernel image -----------------------*/ memblock_reserve(CONFIG_LINUX_LINK_BASE, @@ -84,7 +84,7 @@ void __init setup_arch_memory(void) /*-------------- node setup --------------------------------*/ memset(zones_size, 0, sizeof(zones_size)); - zones_size[ZONE_NORMAL] = num_physpages; + zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; /* * We can't use the helper free_area_init(zones[]) because it uses @@ -106,39 +106,9 @@ void __init setup_arch_memory(void) */ void __init mem_init(void) { - int codesize, datasize, initsize, reserved_pages, free_pages; - int tmp; - high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz); - - totalram_pages = free_all_bootmem(); - - /* count all reserved pages [kernel code/data/mem_map..] */ - reserved_pages = 0; - for (tmp = 0; tmp < max_mapnr; tmp++) - if (PageReserved(mem_map + tmp)) - reserved_pages++; - - /* XXX: nr_free_pages() is equivalent */ - free_pages = max_mapnr - reserved_pages; - - /* - * For the purpose of display below, split the "reserve mem" - * kernel code/data is already shown explicitly, - * Show any other reservations (mem_map[ ] et al) - */ - reserved_pages -= (((unsigned int)_end - CONFIG_LINUX_LINK_BASE) >> - PAGE_SHIFT); - - codesize = _etext - _text; - datasize = _end - _etext; - initsize = __init_end - __init_begin; - - pr_info("Memory Available: %dM / %ldM (%dK code, %dK data, %dK init, %dK reserv)\n", - PAGES_TO_MB(free_pages), - TO_MB(arc_mem_sz), - TO_KB(codesize), TO_KB(datasize), TO_KB(initsize), - PAGES_TO_KB(reserved_pages)); + free_all_bootmem(); + mem_init_print_info(NULL); } /* @@ -146,13 +116,13 @@ void __init mem_init(void) */ void __init_refok free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h index 41e335670f60..6adbc53c3a5b 100644 --- a/arch/arc/plat-arcfpga/include/plat/irq.h +++ b/arch/arc/plat-arcfpga/include/plat/irq.h @@ -16,8 +16,6 @@ #define UART1_IRQ 10 #define UART2_IRQ 11 -#define VMAC_IRQ 6 - #define IDE_IRQ 13 #define PCI_IRQ 14 #define PS2_IRQ 15 diff --git a/arch/arc/plat-arcfpga/include/plat/memmap.h b/arch/arc/plat-arcfpga/include/plat/memmap.h index 1663f3388085..5c78e6135a1f 100644 --- a/arch/arc/plat-arcfpga/include/plat/memmap.h +++ b/arch/arc/plat-arcfpga/include/plat/memmap.h @@ -15,8 +15,6 @@ #define UART0_BASE 0xC0FC1000 #define UART1_BASE 0xC0FC1100 -#define VMAC_REG_BASEADDR 0xC0FC2000 - #define IDE_CONTROLLER_BASE 0xC0FC9000 #define AHB_PCI_HOST_BRG_BASE 0xC0FD0000 diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig index 1d3452100f1f..1ab386bb5da8 100644 --- a/arch/arc/plat-tb10x/Kconfig +++ b/arch/arc/plat-tb10x/Kconfig @@ -22,6 +22,7 @@ menuconfig ARC_PLAT_TB10X select PINCTRL select PINMUX select ARCH_REQUIRE_GPIOLIB + select TB10X_IRQC help Support for platforms based on the TB10x home media gateway SOC by Abilis Systems. TB10x is based on the ARC700 CPU architecture. diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 531cdda016f9..ba412e02ec0c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -14,6 +14,7 @@ config ARM select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP + select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_IDLE_POLL_SETUP select GENERIC_STRNCPY_FROM_USER @@ -40,6 +41,7 @@ config ARM select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZ4 select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZO select HAVE_KERNEL_XZ @@ -1314,7 +1316,7 @@ config ARM_ERRATA_754327 config ARM_ERRATA_364296 bool "ARM errata: Possible cache data corruption with hit-under-miss enabled" - depends on CPU_V6 && !SMP + depends on CPU_V6 help This options enables the workaround for the 364296 ARM1136 r0p2 erratum (possible cache data corruption with diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 5b7be8d975b5..e401a766c0bd 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -510,6 +510,16 @@ choice Say Y here if you want the debug print routines to direct their output to the uart1 port on SiRFmarco devices. + config DEBUG_STI_UART + depends on ARCH_STI + bool "Use StiH415/416 ASC for low-level debug" + help + Say Y here if you want kernel low-level debugging support + on StiH415/416 based platforms like B2000, B2020. + It support UART2 and SBC_UART1. + + If unsure, say N. + config DEBUG_U300_UART bool "Kernel low-level debugging messages via U300 UART0" depends on ARCH_U300 @@ -564,16 +574,6 @@ choice This option selects UART0 on VIA/Wondermedia System-on-a-chip devices, including VT8500, WM8505, WM8650 and WM8850. - config DEBUG_STI_UART - depends on ARCH_STI - bool "Use StiH415/416 ASC for low-level debug" - help - Say Y here if you want kernel low-level debugging support - on StiH415/416 based platforms like B2000, B2020. - It support UART2 and SBC_UART1. - - If unsure, say N. - config DEBUG_LL_UART_NONE bool "No low-level debugging UART" depends on !ARCH_MULTIPLATFORM diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index f79a08efe000..47279aa96a6a 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore @@ -6,6 +6,7 @@ piggy.gzip piggy.lzo piggy.lzma piggy.xzkern +piggy.lz4 vmlinux vmlinux.lds diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 48d0a44270bd..7ac1610252ba 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -91,6 +91,7 @@ suffix_$(CONFIG_KERNEL_GZIP) = gzip suffix_$(CONFIG_KERNEL_LZO) = lzo suffix_$(CONFIG_KERNEL_LZMA) = lzma suffix_$(CONFIG_KERNEL_XZ) = xzkern +suffix_$(CONFIG_KERNEL_LZ4) = lz4 # Borrowed libfdt files for the ATAG compatibility mode @@ -115,7 +116,7 @@ targets := vmlinux vmlinux.lds \ font.o font.c head.o misc.o $(OBJS) # Make sure files are removed during clean -extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ +extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern piggy.lz4 \ lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ hyp-stub.S diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index 24b0475cb8bf..bd245d34952d 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c @@ -51,6 +51,10 @@ extern char * strstr(const char * s1, const char *s2); #include "../../../../lib/decompress_unxz.c" #endif +#ifdef CONFIG_KERNEL_LZ4 +#include "../../../../lib/decompress_unlz4.c" +#endif + int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) { return decompress(input, len, NULL, NULL, output, NULL, error); diff --git a/arch/arm/boot/compressed/piggy.lz4.S b/arch/arm/boot/compressed/piggy.lz4.S new file mode 100644 index 000000000000..3d9a575618a3 --- /dev/null +++ b/arch/arm/boot/compressed/piggy.lz4.S @@ -0,0 +1,6 @@ + .section .piggydata,#alloc + .globl input_data +input_data: + .incbin "arch/arm/boot/compressed/piggy.lz4" + .globl input_data_end +input_data_end: diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts index 04feaf8f1420..444b4ede0d60 100644 --- a/arch/arm/boot/dts/am335x-bone.dts +++ b/arch/arm/boot/dts/am335x-bone.dts @@ -214,10 +214,12 @@ &cpsw_emac0 { phy_id = <&davinci_mdio>, <0>; + phy-mode = "mii"; }; &cpsw_emac1 { phy_id = <&davinci_mdio>, <1>; + phy-mode = "mii"; }; &mac { diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index a16bb9691cc6..3aee1a43782d 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -467,8 +467,24 @@ &cpsw_emac0 { phy_id = <&davinci_mdio>, <0>; + phy-mode = "rgmii-txid"; }; &cpsw_emac1 { phy_id = <&davinci_mdio>, <1>; + phy-mode = "rgmii-txid"; +}; + +&tscadc { + status = "okay"; + tsc { + ti,wires = <4>; + ti,x-plate-resistance = <200>; + ti,coordiante-readouts = <5>; + ti,wire-config = <0x00 0x11 0x22 0x33>; + }; + + adc { + ti,adc-channels = <4 5 6 7>; + }; }; diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 9e00eef9b74b..0c8ad173d2b0 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -392,3 +392,13 @@ pinctrl-0 = <&davinci_mdio_default>; pinctrl-1 = <&davinci_mdio_sleep>; }; + +&cpsw_emac0 { + phy_id = <&davinci_mdio>, <0>; + phy-mode = "rgmii-txid"; +}; + +&cpsw_emac1 { + phy_id = <&davinci_mdio>, <1>; + phy-mode = "rgmii-txid"; +}; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 0d4df90477f7..38b446ba1ce1 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -502,6 +502,23 @@ status = "disabled"; }; + tscadc: tscadc@44e0d000 { + compatible = "ti,am3359-tscadc"; + reg = <0x44e0d000 0x1000>; + interrupt-parent = <&intc>; + interrupts = <16>; + ti,hwmods = "adc_tsc"; + status = "disabled"; + + tsc { + compatible = "ti,am3359-tsc"; + }; + am335x_adc: adc { + #io-channel-cells = <1>; + compatible = "ti,am3359-adc"; + }; + }; + gpmc: gpmc@50000000 { compatible = "ti,am3352-gpmc"; ti,hwmods = "gpmc"; diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi index 7d1a27949c13..9866cd736dee 100644 --- a/arch/arm/boot/dts/atlas6.dtsi +++ b/arch/arm/boot/dts/atlas6.dtsi @@ -613,7 +613,7 @@ }; rtc-iobg { - compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus"; + compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus"; #address-cells = <1>; #size-cells = <1>; reg = <0x80030000 0x10000>; diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi index 3f0239ec1bc5..dc259e8b8a73 100644 --- a/arch/arm/boot/dts/cros5250-common.dtsi +++ b/arch/arm/boot/dts/cros5250-common.dtsi @@ -190,7 +190,7 @@ samsung,i2c-max-bus-freq = <66000>; hdmiddc@50 { - compatible = "samsung,exynos5-hdmiddc"; + compatible = "samsung,exynos4210-hdmiddc"; reg = <0x50>; }; }; @@ -224,7 +224,7 @@ samsung,i2c-max-bus-freq = <378000>; hdmiphy@38 { - compatible = "samsung,exynos5-hdmiphy"; + compatible = "samsung,exynos4212-hdmiphy"; reg = <0x38>; }; }; diff --git a/arch/arm/boot/dts/ecx-common.dtsi b/arch/arm/boot/dts/ecx-common.dtsi index d61b535f682a..e8559b753c9d 100644 --- a/arch/arm/boot/dts/ecx-common.dtsi +++ b/arch/arm/boot/dts/ecx-common.dtsi @@ -33,6 +33,8 @@ calxeda,port-phys = <&combophy5 0 &combophy0 0 &combophy0 1 &combophy0 2 &combophy0 3>; + calxeda,sgpio-gpio =<&gpioh 5 1 &gpioh 6 1 &gpioh 7 1>; + calxeda,led-order = <4 0 1 2 3>; }; sdhci@ffe0e000 { diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 35a66dee4011..49f18c24a576 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -105,7 +105,7 @@ samsung,i2c-max-bus-freq = <66000>; hdmiddc@50 { - compatible = "samsung,exynos5-hdmiddc"; + compatible = "samsung,exynos4210-hdmiddc"; reg = <0x50>; }; }; @@ -135,7 +135,7 @@ samsung,i2c-max-bus-freq = <66000>; hdmiphy@38 { - compatible = "samsung,exynos5-hdmiphy"; + compatible = "samsung,exynos4212-hdmiphy"; reg = <0x38>; }; }; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 41cd625b6020..ef57277fc38f 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -599,7 +599,7 @@ }; hdmi { - compatible = "samsung,exynos5-hdmi"; + compatible = "samsung,exynos4212-hdmi"; reg = <0x14530000 0x70000>; interrupts = <0 95 0>; clocks = <&clock 333>, <&clock 136>, <&clock 137>, @@ -609,7 +609,7 @@ }; mixer { - compatible = "samsung,exynos5-mixer"; + compatible = "samsung,exynos5250-mixer"; reg = <0x14450000 0x10000>; interrupts = <0 94 0>; }; diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 3637bf3b1d59..1f0d38d7b16f 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts @@ -155,12 +155,14 @@ can0: can@80032000 { pinctrl-names = "default"; pinctrl-0 = <&can0_pins_a>; + xceiver-supply = <®_can_3v3>; status = "okay"; }; can1: can@80034000 { pinctrl-names = "default"; pinctrl-0 = <&can1_pins_a>; + xceiver-supply = <®_can_3v3>; status = "okay"; }; }; @@ -319,6 +321,16 @@ gpio = <&gpio3 30 0>; enable-active-high; }; + + reg_can_3v3: can-3v3 { + compatible = "regulator-fixed"; + regulator-name = "can-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio2 13 0>; + enable-active-high; + }; + }; sound { diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index 195451bf7706..6a8acb01b1d3 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi @@ -736,7 +736,7 @@ dcp@80028000 { reg = <0x80028000 0x2000>; interrupts = <52 53 54>; - status = "disabled"; + compatible = "fsl-dcp"; }; pxp@8002a000 { diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts index af32eff9f4b7..7ef282795dd4 100644 --- a/arch/arm/boot/dts/omap3-devkit8000.dts +++ b/arch/arm/boot/dts/omap3-devkit8000.dts @@ -124,20 +124,21 @@ reg = <0 0 0>; /* CS0, offset 0 */ nand-bus-width = <16>; - gpmc,sync-clk = <0>; - gpmc,cs-on = <0>; - gpmc,cs-rd-off = <44>; - gpmc,cs-wr-off = <44>; - gpmc,adv-on = <6>; - gpmc,adv-rd-off = <34>; - gpmc,adv-wr-off = <44>; - gpmc,we-off = <40>; - gpmc,oe-off = <54>; - gpmc,access = <64>; - gpmc,rd-cycle = <82>; - gpmc,wr-cycle = <82>; - gpmc,wr-access = <40>; - gpmc,wr-data-mux-bus = <0>; + gpmc,device-nand; + gpmc,sync-clki-ps = <0>; + gpmc,cs-on-ns = <0>; + gpmc,cs-rd-off-ns = <44>; + gpmc,cs-wr-off-ns = <44>; + gpmc,adv-on-ns = <6>; + gpmc,adv-rd-off-ns = <34>; + gpmc,adv-wr-off-ns = <44>; + gpmc,we-off-ns = <40>; + gpmc,oe-off-ns = <54>; + gpmc,access-ns = <64>; + gpmc,rd-cycle-ns = <82>; + gpmc,wr-cycle-ns = <82>; + gpmc,wr-access-ns = <40>; + gpmc,wr-data-mux-bus-ns = <0>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi index 02edd8965f8a..05e9489cf95c 100644 --- a/arch/arm/boot/dts/prima2.dtsi +++ b/arch/arm/boot/dts/prima2.dtsi @@ -610,7 +610,7 @@ }; rtc-iobg { - compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus"; + compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus"; #address-cells = <1>; #size-cells = <1>; reg = <0x80030000 0x10000>; diff --git a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts index 0e22a285dfe0..757c4cd900ee 100644 --- a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts +++ b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts @@ -27,6 +27,21 @@ }; soc@01c20000 { + emac: ethernet@01c0b000 { + pinctrl-names = "default"; + pinctrl-0 = <&emac_pins_a>; + phy = <&phy1>; + status = "okay"; + }; + + mdio@01c0b080 { + status = "okay"; + + phy1: ethernet-phy@1 { + reg = <1>; + }; + }; + pinctrl@01c20800 { led_pins_cubieboard: led_pins@0 { allwinner,pins = "PH20", "PH21"; diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts index b9efac100c85..3514b37d66bc 100644 --- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts +++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts @@ -23,10 +23,51 @@ }; soc@01c20000 { + emac: ethernet@01c0b000 { + pinctrl-names = "default"; + pinctrl-0 = <&emac_pins_a>; + phy = <&phy0>; + status = "okay"; + }; + + mdio@01c0b080 { + phy-supply = <®_emac_3v3>; + status = "okay"; + + phy0: ethernet-phy@0 { + reg = <0>; + }; + }; + + pio: pinctrl@01c20800 { + pinctrl-names = "default"; + pinctrl-0 = <&hackberry_hogs>; + + hackberry_hogs: hogs@0 { + allwinner,pins = "PH19"; + allwinner,function = "gpio_out"; + allwinner,drive = <0>; + allwinner,pull = <0>; + }; + }; + uart0: serial@01c28000 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pins_a>; status = "okay"; }; }; + + regulators { + compatible = "simple-bus"; + + reg_emac_3v3: emac-3v3 { + compatible = "regulator-fixed"; + regulator-name = "emac-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + enable-active-high; + gpio = <&pio 7 19 0>; + }; + }; }; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 82e03d22f913..b2bd6e124250 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -167,6 +167,22 @@ reg = <0x01c20000 0x300000>; ranges; + emac: ethernet@01c0b000 { + compatible = "allwinner,sun4i-emac"; + reg = <0x01c0b000 0x1000>; + interrupts = <55>; + clocks = <&ahb_gates 17>; + status = "disabled"; + }; + + mdio@01c0b080 { + compatible = "allwinner,sun4i-mdio"; + reg = <0x01c0b080 0x14>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + intc: interrupt-controller@01c20400 { compatible = "allwinner,sun4i-ic"; reg = <0x01c20400 0x400>; @@ -226,6 +242,17 @@ allwinner,drive = <0>; allwinner,pull = <0>; }; + + emac_pins_a: emac0@0 { + allwinner,pins = "PA0", "PA1", "PA2", + "PA3", "PA4", "PA5", "PA6", + "PA7", "PA8", "PA9", "PA10", + "PA11", "PA12", "PA13", "PA14", + "PA15", "PA16"; + allwinner,function = "emac"; + allwinner,drive = <0>; + allwinner,pull = <0>; + }; }; timer@01c20c00 { diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index ab177b406b78..365760b33a26 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts @@ -828,6 +828,7 @@ regulator-name = "vdd_vbus_wup1"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + enable-active-high; gpio = <&gpio 24 0>; /* PD0 */ }; }; diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index 170159910455..ed4b901b0227 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts @@ -410,6 +410,7 @@ regulator-name = "usb1_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + enable-active-high; gpio = <&gpio 170 0>; /* PV2 */ }; }; diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index ea078ab8edeb..ab67c94db280 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts @@ -586,6 +586,7 @@ regulator-name = "vbus1"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + enable-active-high; gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ }; @@ -595,6 +596,7 @@ regulator-name = "vbus3"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + enable-active-high; gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ }; }; diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c index ddc740769601..023ee63827a2 100644 --- a/arch/arm/common/timer-sp.c +++ b/arch/arm/common/timer-sp.c @@ -28,8 +28,8 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <asm/hardware/arm_timer.h> #include <asm/hardware/timer-sp.h> diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig index e3bf2d65618e..65edf6d47215 100644 --- a/arch/arm/configs/bcm_defconfig +++ b/arch/arm/configs/bcm_defconfig @@ -78,6 +78,13 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=y CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_USB_SUPPORT is not set +CONFIG_MMC=y +CONFIG_MMC_UNSAFE_RESUME=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_TEST=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_BCM_KONA=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_TRIGGERS=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 340d550c12b0..fe0bdc361d2c 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -1,88 +1,167 @@ -CONFIG_EXPERIMENTAL=y +CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_ARCH_MVEBU=y CONFIG_MACH_ARMADA_370=y -CONFIG_ARCH_SIRF=y CONFIG_MACH_ARMADA_XP=y +CONFIG_ARCH_BCM=y +CONFIG_GPIO_PCA953X=y CONFIG_ARCH_HIGHBANK=y +CONFIG_ARCH_KEYSTONE=y +CONFIG_ARCH_MXC=y +CONFIG_MACH_IMX51_DT=y +CONFIG_SOC_IMX53=y +CONFIG_SOC_IMX6Q=y +CONFIG_SOC_IMX6SL=y +CONFIG_SOC_VF610=y +CONFIG_ARCH_OMAP3=y +CONFIG_ARCH_OMAP4=y +CONFIG_SOC_OMAP5=y +CONFIG_SOC_AM33XX=y +CONFIG_SOC_AM43XX=y +CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_SOCFPGA=y -CONFIG_ARCH_SUNXI=y -CONFIG_ARCH_WM8850=y -# CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set -CONFIG_ARCH_ZYNQ=y -CONFIG_ARM_ERRATA_754322=y CONFIG_PLAT_SPEAR=y CONFIG_ARCH_SPEAR13XX=y CONFIG_MACH_SPEAR1310=y CONFIG_MACH_SPEAR1340=y +CONFIG_ARCH_STI=y +CONFIG_ARCH_SUNXI=y +CONFIG_ARCH_SIRF=y +CONFIG_ARCH_TEGRA=y +CONFIG_ARCH_TEGRA_2x_SOC=y +CONFIG_ARCH_TEGRA_3x_SOC=y +CONFIG_ARCH_TEGRA_114_SOC=y +CONFIG_TEGRA_PCI=y +CONFIG_TEGRA_EMC_SCALING_ENABLE=y +CONFIG_ARCH_U8500=y +CONFIG_MACH_SNOWBALL=y +CONFIG_MACH_UX500_DT=y +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_VEXPRESS_CA9X4=y +CONFIG_ARCH_VIRT=y +CONFIG_ARCH_WM8850=y +CONFIG_ARCH_ZYNQ=y CONFIG_SMP=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_AEABI=y -CONFIG_HIGHMEM=y CONFIG_HIGHPTE=y CONFIG_ARM_APPENDED_DTB=y -CONFIG_VFP=y -CONFIG_NEON=y CONFIG_NET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_SD=y CONFIG_ATA=y +CONFIG_SATA_AHCI_PLATFORM=y CONFIG_SATA_HIGHBANK=y CONFIG_SATA_MV=y -CONFIG_SATA_AHCI_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_SUN4I_EMAC=y CONFIG_NET_CALXEDA_XGMAC=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=y -CONFIG_SERIO_AMBAKMI=y CONFIG_MDIO_SUN4I=y +CONFIG_KEYBOARD_SPEAR=y +CONFIG_SERIO_AMBAKMI=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y -CONFIG_KEYBOARD_SPEAR=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_SIRFSOC=y CONFIG_SERIAL_SIRFSOC_CONSOLE=y +CONFIG_SERIAL_TEGRA=y +CONFIG_SERIAL_IMX=y +CONFIG_SERIAL_IMX_CONSOLE=y CONFIG_SERIAL_VT8500=y CONFIG_SERIAL_VT8500_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_OMAP=y +CONFIG_SERIAL_OMAP_CONSOLE=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y -CONFIG_IPMI_HANDLER=y -CONFIG_IPMI_SI=y -CONFIG_I2C=y +CONFIG_SERIAL_FSL_LPUART=y +CONFIG_SERIAL_FSL_LPUART_CONSOLE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_SIRF=y +CONFIG_I2C_TEGRA=y CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_SPI_SIRF=y -CONFIG_GPIO_PL061=y -CONFIG_FB=y +CONFIG_SPI_TEGRA114=y +CONFIG_SPI_TEGRA20_SLINK=y +CONFIG_PINCTRL_SINGLE=y +CONFIG_GPIO_GENERIC_PLATFORM=y +CONFIG_GPIO_TWL4030=y +CONFIG_REGULATOR_GPIO=y +CONFIG_REGULATOR_AB8500=y +CONFIG_REGULATOR_TPS51632=y +CONFIG_REGULATOR_TPS62360=y +CONFIG_REGULATOR_TWL4030=y +CONFIG_REGULATOR_VEXPRESS=y +CONFIG_DRM=y +CONFIG_TEGRA_HOST1X=y +CONFIG_DRM_TEGRA=y CONFIG_FB_ARMCLCD=y CONFIG_FB_WM8505=y -CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FB_SIMPLE=y CONFIG_USB=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_MXC=y +CONFIG_USB_EHCI_TEGRA=y +CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_ISP1760_HCD=y CONFIG_USB_STORAGE=y +CONFIG_AB8500_USB=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_OMAP_USB2=y +CONFIG_OMAP_USB3=y +CONFIG_SAMSUNG_USB2PHY=y +CONFIG_SAMSUNG_USB3PHY=y +CONFIG_USB_GPIO_VBUS=y +CONFIG_USB_ISP1301=y +CONFIG_USB_MXS_PHY=y CONFIG_MMC=y CONFIG_MMC_ARMMMCI=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_TEGRA=y CONFIG_MMC_SDHCI_SPEAR=y -CONFIG_MMC_WMT=y +CONFIG_MMC_OMAP=y +CONFIG_MMC_OMAP_HS=y CONFIG_EDAC=y CONFIG_EDAC_MM_EDAC=y CONFIG_EDAC_HIGHBANK_MC=y CONFIG_EDAC_HIGHBANK_L2=y CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_TWL4030=y CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_VT8500=y -CONFIG_PWM=y -CONFIG_PWM_VT8500=y +CONFIG_RTC_DRV_TEGRA=y CONFIG_DMADEVICES=y -CONFIG_PL330_DMA=y -CONFIG_SIRF_DMA=y CONFIG_DW_DMAC=y +CONFIG_TEGRA20_APB_DMA=y +CONFIG_STE_DMA40=y +CONFIG_SIRF_DMA=y +CONFIG_TI_EDMA=y +CONFIG_PL330_DMA=y +CONFIG_IMX_SDMA=y +CONFIG_IMX_DMA=y +CONFIG_MXS_DMA=y +CONFIG_DMA_OMAP=y +CONFIG_PWM=y +CONFIG_PWM_VT8500=y +CONFIG_EXT4_FS=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_ROOT_NFS=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y +CONFIG_LOCKUP_DETECTOR=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 2ac0ffb12f03..5339e6a4d639 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -22,6 +22,10 @@ CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set CONFIG_ARCH_MULTI_V6=y CONFIG_ARCH_OMAP2PLUS=y +CONFIG_ARCH_OMAP2=y +CONFIG_ARCH_OMAP3=y +CONFIG_ARCH_OMAP4=y +CONFIG_SOC_AM33XX=y CONFIG_OMAP_RESET_CLOCKS=y CONFIG_OMAP_MUX_DEBUG=y CONFIG_ARCH_VEXPRESS_CA9X4=y @@ -34,6 +38,8 @@ CONFIG_NR_CPUS=2 CONFIG_LEDS=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_ARM_APPENDED_DTB=y +CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200" CONFIG_KEXEC=y CONFIG_FPE_NWFPE=y @@ -152,6 +158,13 @@ CONFIG_W1=y CONFIG_POWER_SUPPLY=y CONFIG_SENSORS_LM75=m CONFIG_WATCHDOG=y +CONFIG_THERMAL=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y CONFIG_OMAP_WATCHDOG=y CONFIG_TWL4030_WATCHDOG=y CONFIG_MFD_TPS65217=y @@ -238,7 +251,13 @@ CONFIG_RTC_DRV_TWL92330=y CONFIG_RTC_DRV_TWL4030=y CONFIG_RTC_DRV_OMAP=y CONFIG_DMADEVICES=y +CONFIG_TI_EDMA=y CONFIG_DMA_OMAP=y +CONFIG_TI_SOC_THERMAL=y +CONFIG_TI_THERMAL=y +CONFIG_OMAP4_THERMAL=y +CONFIG_OMAP5_THERMAL=y +CONFIG_DRA752_THERMAL=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set @@ -286,3 +305,4 @@ CONFIG_SOC_OMAP5=y CONFIG_TI_DAVINCI_MDIO=y CONFIG_TI_DAVINCI_CPDMA=y CONFIG_TI_CPSW=y +CONFIG_AT803X_PHY=y diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig index 1fdb82694ca2..82eaa552ed14 100644 --- a/arch/arm/configs/spear13xx_defconfig +++ b/arch/arm/configs/spear13xx_defconfig @@ -61,7 +61,6 @@ CONFIG_GPIO_SYSFS=y CONFIG_GPIO_PL061=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y -CONFIG_MPCORE_WATCHDOG=y # CONFIG_HID_SUPPORT is not set CONFIG_USB=y # CONFIG_USB_DEVICE_CLASS is not set diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index c037aa1065b7..a0025dc13021 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -1,6 +1,8 @@ -CONFIG_EXPERIMENTAL=y +CONFIG_HIGHMEM=y # CONFIG_SWAP is not set CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y CONFIG_MODULES=y @@ -9,10 +11,7 @@ CONFIG_MODULE_UNLOAD=y CONFIG_ARCH_U8500=y CONFIG_MACH_HREFV60=y CONFIG_MACH_SNOWBALL=y -CONFIG_MACH_U5500=y CONFIG_MACH_UX500_DT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_PREEMPT=y @@ -20,6 +19,7 @@ CONFIG_AEABI=y CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8" CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_IDLE=y CONFIG_VFP=y CONFIG_NEON=y CONFIG_PM_RUNTIME=y @@ -36,7 +36,6 @@ CONFIG_CAIF=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_AB8500_PWM=y CONFIG_SENSORS_BH1780=y CONFIG_NETDEVICES=y CONFIG_SMSC911X=y @@ -60,35 +59,39 @@ CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_NOMADIK=y CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_GPIO_STMPE=y CONFIG_GPIO_TC3589X=y -# CONFIG_POWER_SUPPLY is not set -# CONFIG_AB8500_BM is not set -# CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL is not set CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y +CONFIG_WATCHDOG=y CONFIG_MFD_STMPE=y CONFIG_MFD_TC3589X=y -CONFIG_AB5500_CORE=y -CONFIG_AB8500_CORE=y -CONFIG_REGULATOR=y -CONFIG_REGULATOR_AB8500=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y -# CONFIG_HID_SUPPORT is not set -CONFIG_USB_GADGET=y +CONFIG_REGULATOR_AB8500=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SOC=y +CONFIG_SND_SOC_UX500=y +CONFIG_SND_SOC_UX500_MACH_MOP500=y +CONFIG_USB=y +CONFIG_USB_MUSB_HDRC=y +CONFIG_USB_MUSB_UX500=y +CONFIG_USB_PHY=y CONFIG_AB8500_USB=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_MUSB_HDRC=y +CONFIG_USB_ETH=m CONFIG_MMC=y -CONFIG_MMC_CLKGATE=y +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_BLOCK_BOUNCE is not set CONFIG_MMC_ARMMMCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_LM3530=y -CONFIG_LEDS_LP5521=y CONFIG_LEDS_GPIO=y +CONFIG_LEDS_LP5521=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_RTC_CLASS=y @@ -108,7 +111,6 @@ CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y -CONFIG_CONFIGFS_FS=m # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y @@ -122,3 +124,7 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_INFO=y # CONFIG_FTRACE is not set CONFIG_DEBUG_USER=y +CONFIG_CRYPTO_DEV_UX500=y +CONFIG_CRYPTO_DEV_UX500_CRYP=y +CONFIG_CRYPTO_DEV_UX500_HASH=y +CONFIG_CRYPTO_DEV_UX500_DEBUG=y diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index ed94b1a366ae..423744bf18eb 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -223,11 +223,12 @@ extern int iop3xx_get_init_atu(void); #ifndef __ASSEMBLY__ #include <linux/types.h> +#include <linux/reboot.h> void iop3xx_map_io(void); void iop_init_cp6_handler(void); void iop_init_time(unsigned long tickrate); -void iop3xx_restart(char, const char *); +void iop3xx_restart(enum reboot_mode, const char *); static inline u32 read_tmr0(void) { diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 75bf07910b81..441efc491b50 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -11,6 +11,7 @@ #include <linux/types.h> #ifndef __ASSEMBLY__ +#include <linux/reboot.h> struct tag; struct meminfo; @@ -43,7 +44,7 @@ struct machine_desc { unsigned char reserve_lp0 :1; /* never has lp0 */ unsigned char reserve_lp1 :1; /* never has lp1 */ unsigned char reserve_lp2 :1; /* never has lp2 */ - char restart_mode; /* default restart mode */ + enum reboot_mode reboot_mode; /* default restart mode */ struct smp_operations *smp; /* SMP operations */ bool (*smp_init)(void); void (*fixup)(struct tag *, char **, @@ -58,7 +59,7 @@ struct machine_desc { #ifdef CONFIG_MULTI_IRQ_HANDLER void (*handle_irq)(struct pt_regs *); #endif - void (*restart)(char, const char *); + void (*restart)(enum reboot_mode, const char *); }; /* diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 584786f740f9..e750a938fd3c 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -276,12 +276,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) /* * Conversion between a struct page and a physical address. * - * Note: when converting an unknown physical address to a - * struct page, the resulting pointer must be validated - * using VALID_PAGE(). It must return an invalid struct page - * for any physical address not corresponding to a system - * RAM address. - * * page_to_pfn(page) convert a struct page * to a PFN number * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * * diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index 3d520ddca61b..2389b71a8e7c 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -1,16 +1,4 @@ -/* - * sched_clock.h: support for extending counters to full 64-bit ns counter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. +/* You shouldn't include this file. Use linux/sched_clock.h instead. + * Temporary file until all asm/sched_clock.h users are gone */ -#ifndef ASM_SCHED_CLOCK -#define ASM_SCHED_CLOCK - -extern void sched_clock_postinit(void); -extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); - -extern unsigned long long (*sched_clock_func)(void); - -#endif +#include <linux/sched_clock.h> diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 18d169373612..0393fbab8dd5 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -23,10 +23,21 @@ static inline unsigned long scu_a9_get_base(void) return pa; } +#ifdef CONFIG_HAVE_ARM_SCU unsigned int scu_get_core_count(void __iomem *); int scu_power_mode(void __iomem *, unsigned int); +#else +static inline unsigned int scu_get_core_count(void __iomem *scu_base) +{ + return 0; +} +static inline int scu_power_mode(void __iomem *scu_base, unsigned int mode) +{ + return -EINVAL; +} +#endif -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_ARM_SCU) void scu_enable(void __iomem *scu_base); #else static inline void scu_enable(void __iomem *scu_base) {} diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 21a23e378bbe..a3d61ad984af 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -6,11 +6,12 @@ #include <linux/compiler.h> #include <linux/linkage.h> #include <linux/irqflags.h> +#include <linux/reboot.h> extern void cpu_init(void); void soft_restart(unsigned long); -extern void (*arm_pm_restart)(char str, const char *cmd); +extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_idle)(void); #define UDBG_UNDEFINED (1 << 0) diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 799f42ecca63..7704e28c3483 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -47,6 +47,7 @@ unsigned long HYPERVISOR_hvm_op(int op, void *arg); int HYPERVISOR_memory_op(unsigned int cmd, void *arg); int HYPERVISOR_physdev_op(int cmd, void *arg); int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); +int HYPERVISOR_tmem_op(void *arg); static inline void MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index fccfbdb03df1..86d10dd47dc4 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -16,7 +16,7 @@ CFLAGS_REMOVE_return_address.o = -pg # Object file lists. obj-y := elf.o entry-common.o irq.o opcodes.o \ - process.o ptrace.o return_address.o sched_clock.o \ + process.o ptrace.o return_address.o \ setup.o signal.o stacktrace.o sys_arm.o time.o traps.o obj-$(CONFIG_ATAGS) += atags_parse.o diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index 59dcdced6e30..221f07b11ccb 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -11,9 +11,9 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> +#include <linux/sched_clock.h> #include <asm/delay.h> -#include <asm/sched_clock.h> #include <clocksource/arm_arch_timer.h> diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 5b391a689b47..76ab5ca50610 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -133,6 +133,9 @@ ENTRY(lookup_processor_type) ldmfd sp!, {r4 - r6, r9, pc} ENDPROC(lookup_processor_type) + __FINIT + .text + /* * Read processor ID register (CP#15, CR0), and look up in the linker-built * supported processor list. Note that we can't use the absolute addresses diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 7f1efcd4a6e9..d3ca4f6915af 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -32,6 +32,7 @@ #include <linux/hw_breakpoint.h> #include <linux/cpuidle.h> #include <linux/leds.h> +#include <linux/reboot.h> #include <asm/cacheflush.h> #include <asm/idmap.h> @@ -113,7 +114,7 @@ void soft_restart(unsigned long addr) BUG(); } -static void null_restart(char mode, const char *cmd) +static void null_restart(enum reboot_mode reboot_mode, const char *cmd) { } @@ -123,7 +124,7 @@ static void null_restart(char mode, const char *cmd) void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); -void (*arm_pm_restart)(char str, const char *cmd) = null_restart; +void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = null_restart; EXPORT_SYMBOL_GPL(arm_pm_restart); /* @@ -175,16 +176,6 @@ void arch_cpu_idle(void) default_idle(); } -static char reboot_mode = 'h'; - -int __init reboot_setup(char *str) -{ - reboot_mode = str[0]; - return 1; -} - -__setup("reboot=", reboot_setup); - /* * Called by kexec, immediately prior to machine_kexec(). * diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 2bc1514d6dbe..0dd3b79b15c3 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -886,20 +886,12 @@ long arch_ptrace(struct task_struct *child, long request, #ifdef CONFIG_HAVE_HW_BREAKPOINT case PTRACE_GETHBPREGS: - if (ptrace_get_breakpoints(child) < 0) - return -ESRCH; - ret = ptrace_gethbpregs(child, addr, (unsigned long __user *)data); - ptrace_put_breakpoints(child); break; case PTRACE_SETHBPREGS: - if (ptrace_get_breakpoints(child) < 0) - return -ESRCH; - ret = ptrace_sethbpregs(child, addr, (unsigned long __user *)data); - ptrace_put_breakpoints(child); break; #endif diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c deleted file mode 100644 index e8edcaa0e432..000000000000 --- a/arch/arm/kernel/sched_clock.c +++ /dev/null @@ -1,217 +0,0 @@ -/* - * sched_clock.c: support for extending counters to full 64-bit ns counter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/clocksource.h> -#include <linux/init.h> -#include <linux/jiffies.h> -#include <linux/kernel.h> -#include <linux/moduleparam.h> -#include <linux/sched.h> -#include <linux/syscore_ops.h> -#include <linux/timer.h> - -#include <asm/sched_clock.h> - -struct clock_data { - u64 epoch_ns; - u32 epoch_cyc; - u32 epoch_cyc_copy; - unsigned long rate; - u32 mult; - u32 shift; - bool suspended; - bool needs_suspend; -}; - -static void sched_clock_poll(unsigned long wrap_ticks); -static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); -static int irqtime = -1; - -core_param(irqtime, irqtime, int, 0400); - -static struct clock_data cd = { - .mult = NSEC_PER_SEC / HZ, -}; - -static u32 __read_mostly sched_clock_mask = 0xffffffff; - -static u32 notrace jiffy_sched_clock_read(void) -{ - return (u32)(jiffies - INITIAL_JIFFIES); -} - -static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; - -static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) -{ - return (cyc * mult) >> shift; -} - -static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) -{ - u64 epoch_ns; - u32 epoch_cyc; - - if (cd.suspended) - return cd.epoch_ns; - - /* - * Load the epoch_cyc and epoch_ns atomically. We do this by - * ensuring that we always write epoch_cyc, epoch_ns and - * epoch_cyc_copy in strict order, and read them in strict order. - * If epoch_cyc and epoch_cyc_copy are not equal, then we're in - * the middle of an update, and we should repeat the load. - */ - do { - epoch_cyc = cd.epoch_cyc; - smp_rmb(); - epoch_ns = cd.epoch_ns; - smp_rmb(); - } while (epoch_cyc != cd.epoch_cyc_copy); - - return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); -} - -/* - * Atomically update the sched_clock epoch. - */ -static void notrace update_sched_clock(void) -{ - unsigned long flags; - u32 cyc; - u64 ns; - - cyc = read_sched_clock(); - ns = cd.epoch_ns + - cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, - cd.mult, cd.shift); - /* - * Write epoch_cyc and epoch_ns in a way that the update is - * detectable in cyc_to_fixed_sched_clock(). - */ - raw_local_irq_save(flags); - cd.epoch_cyc_copy = cyc; - smp_wmb(); - cd.epoch_ns = ns; - smp_wmb(); - cd.epoch_cyc = cyc; - raw_local_irq_restore(flags); -} - -static void sched_clock_poll(unsigned long wrap_ticks) -{ - mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); - update_sched_clock(); -} - -void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) -{ - unsigned long r, w; - u64 res, wrap; - char r_unit; - - if (cd.rate > rate) - return; - - BUG_ON(bits > 32); - WARN_ON(!irqs_disabled()); - read_sched_clock = read; - sched_clock_mask = (1 << bits) - 1; - cd.rate = rate; - - /* calculate the mult/shift to convert counter ticks to ns. */ - clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); - - r = rate; - if (r >= 4000000) { - r /= 1000000; - r_unit = 'M'; - } else if (r >= 1000) { - r /= 1000; - r_unit = 'k'; - } else - r_unit = ' '; - - /* calculate how many ns until we wrap */ - wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); - do_div(wrap, NSEC_PER_MSEC); - w = wrap; - - /* calculate the ns resolution of this counter */ - res = cyc_to_ns(1ULL, cd.mult, cd.shift); - pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", - bits, r, r_unit, res, w); - - /* - * Start the timer to keep sched_clock() properly updated and - * sets the initial epoch. - */ - sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); - update_sched_clock(); - - /* - * Ensure that sched_clock() starts off at 0ns - */ - cd.epoch_ns = 0; - - /* Enable IRQ time accounting if we have a fast enough sched_clock */ - if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) - enable_sched_clock_irqtime(); - - pr_debug("Registered %pF as sched_clock source\n", read); -} - -static unsigned long long notrace sched_clock_32(void) -{ - u32 cyc = read_sched_clock(); - return cyc_to_sched_clock(cyc, sched_clock_mask); -} - -unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; - -unsigned long long notrace sched_clock(void) -{ - return sched_clock_func(); -} - -void __init sched_clock_postinit(void) -{ - /* - * If no sched_clock function has been provided at that point, - * make it the final one one. - */ - if (read_sched_clock == jiffy_sched_clock_read) - setup_sched_clock(jiffy_sched_clock_read, 32, HZ); - - sched_clock_poll(sched_clock_timer.data); -} - -static int sched_clock_suspend(void) -{ - sched_clock_poll(sched_clock_timer.data); - cd.suspended = true; - return 0; -} - -static void sched_clock_resume(void) -{ - cd.epoch_cyc = read_sched_clock(); - cd.epoch_cyc_copy = cd.epoch_cyc; - cd.suspended = false; -} - -static struct syscore_ops sched_clock_ops = { - .suspend = sched_clock_suspend, - .resume = sched_clock_resume, -}; - -static int __init sched_clock_syscore_init(void) -{ - register_syscore_ops(&sched_clock_ops); - return 0; -} -device_initcall(sched_clock_syscore_init); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 9b653278c9e8..63af9a7ae512 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -74,7 +74,7 @@ __setup("fpe=", fpe_setup); extern void paging_init(struct machine_desc *desc); extern void sanity_check_meminfo(void); -extern void reboot_setup(char *str); +extern enum reboot_mode reboot_mode; extern void setup_dma_zone(struct machine_desc *desc); unsigned int processor_id; @@ -861,8 +861,8 @@ void __init setup_arch(char **cmdline_p) setup_dma_zone(mdesc); - if (mdesc->restart_mode) - reboot_setup(&mdesc->restart_mode); + if (mdesc->reboot_mode != REBOOT_HARD) + reboot_mode = mdesc->reboot_mode; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 90525d9d290b..f6fd1d4398c6 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -120,7 +120,7 @@ static int twd_rate_change(struct notifier_block *nb, * changing cpu. */ if (flags == POST_RATE_CHANGE) - smp_call_function(twd_update_frequency, + on_each_cpu(twd_update_frequency, (void *)&cnd->new_rate, 1); return NOTIFY_OK; diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index abff4e9aaee0..98aee3258398 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -24,9 +24,9 @@ #include <linux/timer.h> #include <linux/clocksource.h> #include <linux/irq.h> +#include <linux/sched_clock.h> #include <asm/thread_info.h> -#include <asm/sched_clock.h> #include <asm/stacktrace.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> @@ -120,6 +120,4 @@ void __init time_init(void) machine_desc->init_time(); else clocksource_of_init(); - - sched_clock_postinit(); } diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 741f66a2edbd..9c697db2787e 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -219,6 +219,10 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index 9eb574397ee1..4aad93d54d6f 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -11,6 +11,7 @@ */ #include <linux/module.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/mach/arch.h> @@ -304,7 +305,7 @@ static void at91rm9200_idle(void) at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); } -static void at91rm9200_restart(char mode, const char *cmd) +static void at91rm9200_restart(enum reboot_mode reboot_mode, const char *cmd) { /* * Perform a hardware reset with the use of the Watchdog timer. diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h index f6de36aefe85..dc6e2f5f804d 100644 --- a/arch/arm/mach-at91/generic.h +++ b/arch/arm/mach-at91/generic.h @@ -10,6 +10,7 @@ #include <linux/clkdev.h> #include <linux/of.h> +#include <linux/reboot.h> /* Map io */ extern void __init at91_map_io(void); @@ -60,8 +61,8 @@ extern void at91sam9_idle(void); /* reset */ extern void at91_ioremap_rstc(u32 base_addr); -extern void at91sam9_alt_restart(char, const char *); -extern void at91sam9g45_restart(char, const char *); +extern void at91sam9_alt_restart(enum reboot_mode, const char *); +extern void at91sam9g45_restart(enum reboot_mode, const char *); /* shutdown */ extern void at91_ioremap_shdwc(u32 base_addr); diff --git a/arch/arm/mach-bcm2835/bcm2835.c b/arch/arm/mach-bcm2835/bcm2835.c index 740fa9ebe249..40686d7ef500 100644 --- a/arch/arm/mach-bcm2835/bcm2835.c +++ b/arch/arm/mach-bcm2835/bcm2835.c @@ -53,7 +53,7 @@ static void bcm2835_setup_restart(void) WARN(!wdt_regs, "failed to remap watchdog regs"); } -static void bcm2835_restart(char mode, const char *cmd) +static void bcm2835_restart(enum reboot_mode mode, const char *cmd) { u32 val; @@ -91,7 +91,7 @@ static void bcm2835_power_off(void) writel_relaxed(val, wdt_regs + PM_RSTS); /* Continue with normal reset mechanism */ - bcm2835_restart(0, ""); + bcm2835_restart(REBOOT_HARD, ""); } static struct map_desc io_map __initdata = { diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c index f6d1746366d4..4ca2f3ca2de4 100644 --- a/arch/arm/mach-clps711x/common.c +++ b/arch/arm/mach-clps711x/common.c @@ -384,7 +384,7 @@ void __init clps711x_timer_init(void) setup_irq(IRQ_TC2OI, &clps711x_timer_irq); } -void clps711x_restart(char mode, const char *cmd) +void clps711x_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0); } diff --git a/arch/arm/mach-clps711x/common.h b/arch/arm/mach-clps711x/common.h index 2a22f4c6cc75..9a6767bfdc47 100644 --- a/arch/arm/mach-clps711x/common.h +++ b/arch/arm/mach-clps711x/common.h @@ -4,6 +4,8 @@ * Common bits. */ +#include <linux/reboot.h> + #define CLPS711X_NR_IRQS (33) #define CLPS711X_NR_GPIO (4 * 8 + 3) #define CLPS711X_GPIO(prt, bit) ((prt) * 8 + (bit)) @@ -12,5 +14,5 @@ extern void clps711x_map_io(void); extern void clps711x_init_irq(void); extern void clps711x_timer_init(void); extern void clps711x_handle_irq(struct pt_regs *regs); -extern void clps711x_restart(char mode, const char *cmd); +extern void clps711x_restart(enum reboot_mode mode, const char *cmd); extern void clps711x_init_early(void); diff --git a/arch/arm/mach-cns3xxx/core.h b/arch/arm/mach-cns3xxx/core.h index b23b17b4da10..5218b6198dc2 100644 --- a/arch/arm/mach-cns3xxx/core.h +++ b/arch/arm/mach-cns3xxx/core.h @@ -11,6 +11,8 @@ #ifndef __CNS3XXX_CORE_H #define __CNS3XXX_CORE_H +#include <linux/reboot.h> + extern void cns3xxx_timer_init(void); #ifdef CONFIG_CACHE_L2X0 @@ -22,6 +24,6 @@ static inline void cns3xxx_l2x0_init(void) {} void __init cns3xxx_map_io(void); void __init cns3xxx_init_irq(void); void cns3xxx_power_off(void); -void cns3xxx_restart(char, const char *); +void cns3xxx_restart(enum reboot_mode, const char *); #endif /* __CNS3XXX_CORE_H */ diff --git a/arch/arm/mach-cns3xxx/pm.c b/arch/arm/mach-cns3xxx/pm.c index 79e3d47aad65..fb38c726e987 100644 --- a/arch/arm/mach-cns3xxx/pm.c +++ b/arch/arm/mach-cns3xxx/pm.c @@ -89,7 +89,7 @@ void cns3xxx_pwr_soft_rst(unsigned int block) } EXPORT_SYMBOL(cns3xxx_pwr_soft_rst); -void cns3xxx_restart(char mode, const char *cmd) +void cns3xxx_restart(enum reboot_mode mode, const char *cmd) { /* * To reset, we hit the on-board reset register diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index a075b3e0c5c7..e026b19b23ea 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -40,6 +40,7 @@ config ARCH_DAVINCI_DA850 bool "DA850/OMAP-L138/AM18x based system" select ARCH_DAVINCI_DA8XX select ARCH_HAS_CPUFREQ + select CPU_FREQ_TABLE select CP_INTC config ARCH_DAVINCI_DA8XX diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 8a24b6c6339f..bea6793a7ede 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -985,7 +985,6 @@ static struct regulator_init_data tps65070_regulator_data[] = { static struct touchscreen_init_data tps6507x_touchscreen_data = { .poll_period = 30, /* ms between touch samples */ .min_pressure = 0x30, /* minimum pressure to trigger touch */ - .vref = 0, /* turn off vref when not using A/D */ .vendor = 0, /* /sys/class/input/input?/id/vendor */ .product = 65070, /* /sys/class/input/input?/id/product */ .version = 0x100, /* /sys/class/input/input?/id/version */ diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index fd38c8d22e3c..afbc439f11d4 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -509,7 +509,6 @@ struct ths7303_platform_data ths7303_pdata = { .ch_1 = 3, .ch_2 = 3, .ch_3 = 3, - .init_enable = 1, }; static struct amp_config_info vpbe_amp = { diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c index 4d6933848abf..a0d4f6038b60 100644 --- a/arch/arm/mach-davinci/da850.c +++ b/arch/arm/mach-davinci/da850.c @@ -1004,7 +1004,7 @@ static const struct da850_opp da850_opp_96 = { #define OPP(freq) \ { \ - .index = (unsigned int) &da850_opp_##freq, \ + .driver_data = (unsigned int) &da850_opp_##freq, \ .frequency = freq * 1000, \ } @@ -1016,7 +1016,7 @@ static struct cpufreq_frequency_table da850_freq_table[] = { OPP(200), OPP(96), { - .index = 0, + .driver_data = 0, .frequency = CPUFREQ_TABLE_END, }, }; @@ -1044,7 +1044,7 @@ static int da850_set_voltage(unsigned int index) if (!cvdd) return -ENODEV; - opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; + opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data; return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max); } @@ -1125,7 +1125,7 @@ static int da850_set_pll0rate(struct clk *clk, unsigned long index) struct pll_data *pll = clk->pll_data; int ret; - opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; + opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data; prediv = opp->prediv; mult = opp->mult; postdiv = opp->postdiv; diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index eb254fe861ac..71a46a348761 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -16,6 +16,7 @@ #include <linux/serial_8250.h> #include <linux/ahci_platform.h> #include <linux/clk.h> +#include <linux/reboot.h> #include <mach/cputype.h> #include <mach/common.h> @@ -366,7 +367,7 @@ static struct platform_device da8xx_wdt_device = { .resource = da8xx_watchdog_resources, }; -void da8xx_restart(char mode, const char *cmd) +void da8xx_restart(enum reboot_mode mode, const char *cmd) { struct device *dev; diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c index 90b83d00fe2b..111573c0aad1 100644 --- a/arch/arm/mach-davinci/devices.c +++ b/arch/arm/mach-davinci/devices.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/io.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <linux/platform_data/i2c-davinci.h> @@ -307,7 +308,7 @@ struct platform_device davinci_wdt_device = { .resource = wdt_resources, }; -void davinci_restart(char mode, const char *cmd) +void davinci_restart(enum reboot_mode mode, const char *cmd) { davinci_watchdog_reset(&davinci_wdt_device); } diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h index b124b77c90c5..cce316b92c06 100644 --- a/arch/arm/mach-davinci/include/mach/common.h +++ b/arch/arm/mach-davinci/include/mach/common.h @@ -14,6 +14,7 @@ #include <linux/compiler.h> #include <linux/types.h> +#include <linux/reboot.h> extern void davinci_timer_init(void); @@ -81,7 +82,7 @@ extern struct davinci_soc_info davinci_soc_info; extern void davinci_common_init(struct davinci_soc_info *soc_info); extern void davinci_init_ide(void); -void davinci_restart(char mode, const char *cmd); +void davinci_restart(enum reboot_mode mode, const char *cmd); void davinci_init_late(void); #ifdef CONFIG_DAVINCI_RESET_CLOCKS diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h index 3c797e2272f8..7b41a5e9bc31 100644 --- a/arch/arm/mach-davinci/include/mach/da8xx.h +++ b/arch/arm/mach-davinci/include/mach/da8xx.h @@ -17,6 +17,7 @@ #include <linux/davinci_emac.h> #include <linux/spi/spi.h> #include <linux/platform_data/davinci_asp.h> +#include <linux/reboot.h> #include <linux/videodev2.h> #include <mach/serial.h> @@ -106,7 +107,7 @@ int da850_register_vpif_display (struct vpif_display_config *display_config); int da850_register_vpif_capture (struct vpif_capture_config *capture_config); -void da8xx_restart(char mode, const char *cmd); +void da8xx_restart(enum reboot_mode mode, const char *cmd); void da8xx_rproc_reserve_cma(void); int da8xx_register_rproc(void); diff --git a/arch/arm/mach-davinci/include/mach/tnetv107x.h b/arch/arm/mach-davinci/include/mach/tnetv107x.h index 366e975effa8..16314c64f755 100644 --- a/arch/arm/mach-davinci/include/mach/tnetv107x.h +++ b/arch/arm/mach-davinci/include/mach/tnetv107x.h @@ -35,6 +35,7 @@ #include <linux/serial_8250.h> #include <linux/input/matrix_keypad.h> #include <linux/mfd/ti_ssp.h> +#include <linux/reboot.h> #include <linux/platform_data/mmc-davinci.h> #include <linux/platform_data/mtd-davinci.h> @@ -54,7 +55,7 @@ extern struct platform_device tnetv107x_serial_device; extern void tnetv107x_init(void); extern void tnetv107x_devices_init(struct tnetv107x_device_info *); extern void tnetv107x_irq_init(void); -void tnetv107x_restart(char mode, const char *cmd); +void tnetv107x_restart(enum reboot_mode mode, const char *cmd); #endif diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index bad361ec1666..7a55b5c95971 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c @@ -18,8 +18,8 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> diff --git a/arch/arm/mach-davinci/tnetv107x.c b/arch/arm/mach-davinci/tnetv107x.c index 3b2a70d43efa..4545667ecd3c 100644 --- a/arch/arm/mach-davinci/tnetv107x.c +++ b/arch/arm/mach-davinci/tnetv107x.c @@ -19,6 +19,7 @@ #include <linux/io.h> #include <linux/err.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <asm/mach/map.h> @@ -730,7 +731,7 @@ static void tnetv107x_watchdog_reset(struct platform_device *pdev) __raw_writel(1, ®s->kick); } -void tnetv107x_restart(char mode, const char *cmd) +void tnetv107x_restart(enum reboot_mode mode, const char *cmd) { tnetv107x_watchdog_reset(&tnetv107x_wdt_device); } diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c index 2a9443d04d92..00247c771313 100644 --- a/arch/arm/mach-dove/common.c +++ b/arch/arm/mach-dove/common.c @@ -381,7 +381,7 @@ void __init dove_init(void) dove_xor1_init(); } -void dove_restart(char mode, const char *cmd) +void dove_restart(enum reboot_mode mode, const char *cmd) { /* * Enable soft reset to assert RSTOUTn. diff --git a/arch/arm/mach-dove/common.h b/arch/arm/mach-dove/common.h index e86347928b67..1d725224d146 100644 --- a/arch/arm/mach-dove/common.h +++ b/arch/arm/mach-dove/common.h @@ -11,6 +11,8 @@ #ifndef __ARCH_DOVE_COMMON_H #define __ARCH_DOVE_COMMON_H +#include <linux/reboot.h> + struct mv643xx_eth_platform_data; struct mv_sata_platform_data; @@ -42,6 +44,6 @@ void dove_spi1_init(void); void dove_i2c_init(void); void dove_sdio0_init(void); void dove_sdio1_init(void); -void dove_restart(char, const char *); +void dove_restart(enum reboot_mode, const char *); #endif diff --git a/arch/arm/mach-dove/include/mach/bridge-regs.h b/arch/arm/mach-dove/include/mach/bridge-regs.h index 99f259e8cf33..5362df3df89f 100644 --- a/arch/arm/mach-dove/include/mach/bridge-regs.h +++ b/arch/arm/mach-dove/include/mach/bridge-regs.h @@ -26,6 +26,7 @@ #define SYSTEM_SOFT_RESET (BRIDGE_VIRT_BASE + 0x010c) #define SOFT_RESET 0x00000001 +#define BRIDGE_CAUSE (BRIDGE_VIRT_BASE + 0x0110) #define BRIDGE_INT_TIMER1_CLR (~0x0004) #define IRQ_VIRT_BASE (BRIDGE_VIRT_BASE + 0x0200) diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index 8a53f346cdb3..68ac934d4565 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c @@ -311,7 +311,7 @@ static int __init ebsa110_init(void) arch_initcall(ebsa110_init); -static void ebsa110_restart(char mode, const char *cmd) +static void ebsa110_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0x80000000); } @@ -321,7 +321,6 @@ MACHINE_START(EBSA110, "EBSA110") .atag_offset = 0x400, .reserve_lp0 = 1, .reserve_lp2 = 1, - .restart_mode = 's', .map_io = ebsa110_map_io, .init_early = ebsa110_init_early, .init_irq = ebsa110_init_irq, diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index c49ed3dc1aea..df8612fbbc9c 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -35,6 +35,7 @@ #include <linux/spi/spi.h> #include <linux/export.h> #include <linux/irqchip/arm-vic.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <linux/platform_data/video-ep93xx.h> @@ -921,7 +922,7 @@ void __init ep93xx_init_devices(void) gpio_led_register_device(-1, &ep93xx_led_data); } -void ep93xx_restart(char mode, const char *cmd) +void ep93xx_restart(enum reboot_mode mode, const char *cmd) { /* * Set then clear the SWRST bit to initiate a software reset diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h index a14e1b37beff..e256e0baec2e 100644 --- a/arch/arm/mach-ep93xx/include/mach/platform.h +++ b/arch/arm/mach-ep93xx/include/mach/platform.h @@ -4,6 +4,8 @@ #ifndef __ASSEMBLY__ +#include <linux/reboot.h> + struct i2c_gpio_platform_data; struct i2c_board_info; struct spi_board_info; @@ -55,7 +57,7 @@ void ep93xx_ide_release_gpio(struct platform_device *pdev); void ep93xx_init_devices(void); extern void ep93xx_timer_init(void); -void ep93xx_restart(char, const char *); +void ep93xx_restart(enum reboot_mode, const char *); void ep93xx_init_late(void); #ifdef CONFIG_CRUNCH diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index f5f65b58181e..855d4a7b462d 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -38,7 +38,7 @@ config CPU_EXYNOS4210 depends on ARCH_EXYNOS4 select ARM_CPU_SUSPEND if PM select PINCTRL_EXYNOS - select PM_GENERIC_DOMAINS + select PM_GENERIC_DOMAINS if PM select S5P_PM if PM select S5P_SLEEP if PM select SAMSUNG_DMADEV diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index 2c655db4b78e..164685bd25c8 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c @@ -285,12 +285,12 @@ static struct map_desc exynos5440_iodesc0[] __initdata = { }, }; -void exynos4_restart(char mode, const char *cmd) +void exynos4_restart(enum reboot_mode mode, const char *cmd) { __raw_writel(0x1, S5P_SWRESET); } -void exynos5_restart(char mode, const char *cmd) +void exynos5_restart(enum reboot_mode mode, const char *cmd) { struct device_node *np; u32 val; diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index 38d45fd23be4..3e156bcddcb4 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h @@ -12,6 +12,7 @@ #ifndef __ARCH_ARM_MACH_EXYNOS_COMMON_H #define __ARCH_ARM_MACH_EXYNOS_COMMON_H +#include <linux/reboot.h> #include <linux/of.h> void mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1); @@ -20,8 +21,8 @@ extern unsigned long xxti_f, xusbxti_f; struct map_desc; void exynos_init_io(void); -void exynos4_restart(char mode, const char *cmd); -void exynos5_restart(char mode, const char *cmd); +void exynos4_restart(enum reboot_mode mode, const char *cmd); +void exynos5_restart(enum reboot_mode mode, const char *cmd); void exynos_init_late(void); /* ToDo: remove these after migrating legacy exynos4 platforms to dt */ diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c index 6987a09ec219..9669cc0b6318 100644 --- a/arch/arm/mach-footbridge/cats-hw.c +++ b/arch/arm/mach-footbridge/cats-hw.c @@ -86,7 +86,7 @@ fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi) MACHINE_START(CATS, "Chalice-CATS") /* Maintainer: Philip Blundell */ .atag_offset = 0x100, - .restart_mode = 's', + .reboot_mode = REBOOT_SOFT, .fixup = fixup_cats, .map_io = footbridge_map_io, .init_irq = footbridge_init_irq, diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index a42b369bc439..2739ca2c1334 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -198,9 +198,9 @@ void __init footbridge_map_io(void) } } -void footbridge_restart(char mode, const char *cmd) +void footbridge_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* Jump into the ROM */ soft_restart(0x41000000); } else { diff --git a/arch/arm/mach-footbridge/common.h b/arch/arm/mach-footbridge/common.h index a846e50a07b8..56607b3a773e 100644 --- a/arch/arm/mach-footbridge/common.h +++ b/arch/arm/mach-footbridge/common.h @@ -1,3 +1,4 @@ +#include <linux/reboot.h> extern void footbridge_timer_init(void); extern void isa_timer_init(void); @@ -8,4 +9,4 @@ extern void footbridge_map_io(void); extern void footbridge_init_irq(void); extern void isa_init_irq(unsigned int irq); -extern void footbridge_restart(char, const char *); +extern void footbridge_restart(enum reboot_mode, const char *); diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index 90ea23fdce4c..1fd2cf097e30 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c @@ -634,9 +634,9 @@ fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi) #endif } -static void netwinder_restart(char mode, const char *cmd) +static void netwinder_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* Jump into the ROM */ soft_restart(0x41000000); } else { diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h index 3f65206a9b92..aea1ec5ab6f8 100644 --- a/arch/arm/mach-highbank/core.h +++ b/arch/arm/mach-highbank/core.h @@ -1,8 +1,10 @@ #ifndef __HIGHBANK_CORE_H #define __HIGHBANK_CORE_H +#include <linux/reboot.h> + extern void highbank_set_cpu_jump(int cpu, void *jump_addr); -extern void highbank_restart(char, const char *); +extern void highbank_restart(enum reboot_mode, const char *); extern void __iomem *scu_base_addr; #ifdef CONFIG_PM_SLEEP diff --git a/arch/arm/mach-highbank/system.c b/arch/arm/mach-highbank/system.c index 37d8384dcf19..2df5870b7583 100644 --- a/arch/arm/mach-highbank/system.c +++ b/arch/arm/mach-highbank/system.c @@ -15,13 +15,14 @@ */ #include <linux/io.h> #include <asm/proc-fns.h> +#include <linux/reboot.h> #include "core.h" #include "sysregs.h" -void highbank_restart(char mode, const char *cmd) +void highbank_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 'h') + if (mode == REBOOT_HARD) highbank_set_pwr_hard_reset(); else highbank_set_pwr_soft_reset(); diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 60661a4b0e24..f54656091a9d 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig @@ -108,7 +108,6 @@ config SOC_IMX25 select ARCH_MXC_IOMUX_V3 select COMMON_CLK select CPU_ARM926T - select HAVE_CAN_FLEXCAN if CAN select MXC_AVIC config SOC_IMX27 @@ -134,7 +133,6 @@ config SOC_IMX35 select ARCH_MXC_IOMUX_V3 select COMMON_CLK select CPU_V6K - select HAVE_CAN_FLEXCAN if CAN select HAVE_EPIT select MXC_AVIC select SMP_ON_UP if SMP @@ -774,7 +772,6 @@ comment "Device tree only" config SOC_IMX53 bool "i.MX53 support" - select HAVE_CAN_FLEXCAN if CAN select HAVE_IMX_SRC select IMX_HAVE_PLATFORM_IMX2_WDT select PINCTRL @@ -797,7 +794,6 @@ config SOC_IMX6Q select CPU_V7 select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if LOCAL_TIMERS - select HAVE_CAN_FLEXCAN if CAN select HAVE_IMX_ANATOP select HAVE_IMX_GPC select HAVE_IMX_MMDC diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index ee78847abf47..cb6c838b63ed 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -11,6 +11,8 @@ #ifndef __ASM_ARCH_MXC_COMMON_H__ #define __ASM_ARCH_MXC_COMMON_H__ +#include <linux/reboot.h> + struct platform_device; struct pt_regs; struct clk; @@ -71,7 +73,7 @@ extern int mx53_clocks_init_dt(void); extern struct platform_device *mxc_register_gpio(char *name, int id, resource_size_t iobase, resource_size_t iosize, int irq, int irq_high); extern void mxc_set_cpu_type(unsigned int type); -extern void mxc_restart(char, const char *); +extern void mxc_restart(enum reboot_mode, const char *); extern void mxc_arch_reset_init(void __iomem *); extern void mxc_arch_reset_init_dt(void); extern int mx53_revision(void); diff --git a/arch/arm/mach-imx/devices-imx25.h b/arch/arm/mach-imx/devices-imx25.h index 0d2922bc575c..769563fdeaa0 100644 --- a/arch/arm/mach-imx/devices-imx25.h +++ b/arch/arm/mach-imx/devices-imx25.h @@ -13,10 +13,10 @@ extern const struct imx_fec_data imx25_fec_data; imx_add_fec(&imx25_fec_data, pdata) extern const struct imx_flexcan_data imx25_flexcan_data[]; -#define imx25_add_flexcan(id, pdata) \ - imx_add_flexcan(&imx25_flexcan_data[id], pdata) -#define imx25_add_flexcan0(pdata) imx25_add_flexcan(0, pdata) -#define imx25_add_flexcan1(pdata) imx25_add_flexcan(1, pdata) +#define imx25_add_flexcan(id) \ + imx_add_flexcan(&imx25_flexcan_data[id]) +#define imx25_add_flexcan0() imx25_add_flexcan(0) +#define imx25_add_flexcan1() imx25_add_flexcan(1) extern const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data; #define imx25_add_fsl_usb2_udc(pdata) \ diff --git a/arch/arm/mach-imx/devices-imx35.h b/arch/arm/mach-imx/devices-imx35.h index e2675f1b141c..780d8240281b 100644 --- a/arch/arm/mach-imx/devices-imx35.h +++ b/arch/arm/mach-imx/devices-imx35.h @@ -17,10 +17,10 @@ extern const struct imx_fsl_usb2_udc_data imx35_fsl_usb2_udc_data; imx_add_fsl_usb2_udc(&imx35_fsl_usb2_udc_data, pdata) extern const struct imx_flexcan_data imx35_flexcan_data[]; -#define imx35_add_flexcan(id, pdata) \ - imx_add_flexcan(&imx35_flexcan_data[id], pdata) -#define imx35_add_flexcan0(pdata) imx35_add_flexcan(0, pdata) -#define imx35_add_flexcan1(pdata) imx35_add_flexcan(1, pdata) +#define imx35_add_flexcan(id) \ + imx_add_flexcan(&imx35_flexcan_data[id]) +#define imx35_add_flexcan0() imx35_add_flexcan(0) +#define imx35_add_flexcan1() imx35_add_flexcan(1) extern const struct imx_imx2_wdt_data imx35_imx2_wdt_data; #define imx35_add_imx2_wdt() \ diff --git a/arch/arm/mach-imx/devices/Kconfig b/arch/arm/mach-imx/devices/Kconfig index 3dd2b1b041d1..68c74fb0373c 100644 --- a/arch/arm/mach-imx/devices/Kconfig +++ b/arch/arm/mach-imx/devices/Kconfig @@ -4,7 +4,6 @@ config IMX_HAVE_PLATFORM_FEC config IMX_HAVE_PLATFORM_FLEXCAN bool - select HAVE_CAN_FLEXCAN if CAN config IMX_HAVE_PLATFORM_FSL_USB2_UDC bool diff --git a/arch/arm/mach-imx/devices/devices-common.h b/arch/arm/mach-imx/devices/devices-common.h index 453e20bc2657..c13b76b9f6b3 100644 --- a/arch/arm/mach-imx/devices/devices-common.h +++ b/arch/arm/mach-imx/devices/devices-common.h @@ -50,7 +50,6 @@ struct platform_device *__init imx_add_fec( const struct imx_fec_data *data, const struct fec_platform_data *pdata); -#include <linux/can/platform/flexcan.h> struct imx_flexcan_data { int id; resource_size_t iobase; @@ -58,8 +57,7 @@ struct imx_flexcan_data { resource_size_t irq; }; struct platform_device *__init imx_add_flexcan( - const struct imx_flexcan_data *data, - const struct flexcan_platform_data *pdata); + const struct imx_flexcan_data *data); #include <linux/fsl_devices.h> struct imx_fsl_usb2_udc_data { diff --git a/arch/arm/mach-imx/devices/platform-flexcan.c b/arch/arm/mach-imx/devices/platform-flexcan.c index 1078bf0a94ef..55d61eaf63c6 100644 --- a/arch/arm/mach-imx/devices/platform-flexcan.c +++ b/arch/arm/mach-imx/devices/platform-flexcan.c @@ -38,8 +38,7 @@ const struct imx_flexcan_data imx35_flexcan_data[] __initconst = { #endif /* ifdef CONFIG_SOC_IMX35 */ struct platform_device *__init imx_add_flexcan( - const struct imx_flexcan_data *data, - const struct flexcan_platform_data *pdata) + const struct imx_flexcan_data *data) { struct resource res[] = { { @@ -54,5 +53,5 @@ struct platform_device *__init imx_add_flexcan( }; return imx_add_platform_device("flexcan", data->id, - res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); + res, ARRAY_SIZE(res), NULL, 0); } diff --git a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c index e2b70f4c1a2c..e77cc3af6db2 100644 --- a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c +++ b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c @@ -279,7 +279,7 @@ void __init eukrea_mbimxsd25_baseboard_init(void) imx25_add_imx_fb(&eukrea_mximxsd_fb_pdata); imx25_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata); - imx25_add_flexcan1(NULL); + imx25_add_flexcan1(); imx25_add_sdhci_esdhc_imx(0, &sd1_pdata); gpio_request(GPIO_LED1, "LED1"); diff --git a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c index 5a2d5ef12dd5..14d6c8249b76 100644 --- a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c +++ b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c @@ -287,7 +287,7 @@ void __init eukrea_mbimxsd35_baseboard_init(void) imx35_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata); - imx35_add_flexcan1(NULL); + imx35_add_flexcan1(); imx35_add_sdhci_esdhc_imx(0, &sd1_pdata); gpio_request(GPIO_LED1, "LED1"); diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index f5965220a4d8..7be13f8e69a0 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c @@ -27,6 +27,7 @@ #include <linux/of_platform.h> #include <linux/opp.h> #include <linux/phy.h> +#include <linux/reboot.h> #include <linux/regmap.h> #include <linux/micrel_phy.h> #include <linux/mfd/syscon.h> @@ -67,7 +68,7 @@ static void __init imx6q_init_revision(void) mxc_set_cpu_type(rev >> 16 & 0xff); } -static void imx6q_restart(char mode, const char *cmd) +static void imx6q_restart(enum reboot_mode mode, const char *cmd) { struct device_node *np; void __iomem *wdog_base; diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c index 8bcda688a006..13490c203050 100644 --- a/arch/arm/mach-imx/mach-mx25_3ds.c +++ b/arch/arm/mach-imx/mach-mx25_3ds.c @@ -249,7 +249,7 @@ static void __init mx25pdk_init(void) imx25_add_imx_i2c0(&mx25_3ds_i2c0_data); gpio_request_one(MX25PDK_CAN_PWDN, GPIOF_OUT_INIT_LOW, "can-pwdn"); - imx25_add_flexcan0(NULL); + imx25_add_flexcan0(); } static void __init mx25pdk_timer_init(void) diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index dae4cd7be040..6f424eced181 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c @@ -268,10 +268,11 @@ static struct mc13xxx_led_platform_data moboard_led[] = { static struct mc13xxx_leds_platform_data moboard_leds = { .num_leds = ARRAY_SIZE(moboard_led), .led = moboard_led, - .flags = MC13783_LED_SLEWLIMTC, - .abmode = MC13783_LED_AB_DISABLED, - .tc1_period = MC13783_LED_PERIOD_10MS, - .tc2_period = MC13783_LED_PERIOD_10MS, + .led_control[0] = MC13783_LED_C0_ENABLE | MC13783_LED_C0_ABMODE(0), + .led_control[1] = MC13783_LED_C1_SLEWLIM, + .led_control[2] = MC13783_LED_C2_SLEWLIM, + .led_control[3] = MC13783_LED_C3_PERIOD(0), + .led_control[4] = MC13783_LED_C3_PERIOD(0), }; static struct mc13xxx_buttons_platform_data moboard_buttons = { diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c index 8ed533f0f8ca..b726cb1c5fdd 100644 --- a/arch/arm/mach-imx/mach-pcm043.c +++ b/arch/arm/mach-imx/mach-pcm043.c @@ -385,7 +385,7 @@ static void __init pcm043_init(void) if (!otg_mode_host) imx35_add_fsl_usb2_udc(&otg_device_pdata); - imx35_add_flexcan1(NULL); + imx35_add_flexcan1(); imx35_add_sdhci_esdhc_imx(0, &sd1_pdata); } diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c index 7cdc79a9657c..6fe81bb4d3c9 100644 --- a/arch/arm/mach-imx/system.c +++ b/arch/arm/mach-imx/system.c @@ -37,7 +37,7 @@ static struct clk *wdog_clk; /* * Reset the system. It is called by machine_restart(). */ -void mxc_restart(char mode, const char *cmd) +void mxc_restart(enum reboot_mode mode, const char *cmd) { unsigned int wcr_enable; diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c index fea91313678b..cd46529e9eaa 100644 --- a/arch/arm/mach-imx/time.c +++ b/arch/arm/mach-imx/time.c @@ -26,8 +26,8 @@ #include <linux/clockchips.h> #include <linux/clk.h> #include <linux/err.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <asm/mach/time.h> #include "common.h" diff --git a/arch/arm/mach-integrator/common.h b/arch/arm/mach-integrator/common.h index 72516658be1e..ad0ac5547b2c 100644 --- a/arch/arm/mach-integrator/common.h +++ b/arch/arm/mach-integrator/common.h @@ -1,7 +1,8 @@ +#include <linux/reboot.h> #include <linux/amba/serial.h> extern struct amba_pl010_data ap_uart_data; void integrator_init_early(void); int integrator_init(bool is_cp); void integrator_reserve(void); -void integrator_restart(char, const char *); +void integrator_restart(enum reboot_mode, const char *); void integrator_init_sysfs(struct device *parent, u32 id); diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 81461d218717..4cdfd7365925 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -124,7 +124,7 @@ void __init integrator_reserve(void) /* * To reset, we hit the on-board reset register in the system FPGA */ -void integrator_restart(char mode, const char *cmd) +void integrator_restart(enum reboot_mode mode, const char *cmd) { cm_control(CM_CTRL_RESET, CM_CTRL_RESET); } diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index a5b15c4e8def..d9e95e612fcb 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -41,6 +41,7 @@ #include <linux/stat.h> #include <linux/sys_soc.h> #include <linux/termios.h> +#include <linux/sched_clock.h> #include <mach/hardware.h> #include <mach/platform.h> @@ -48,7 +49,6 @@ #include <asm/setup.h> #include <asm/param.h> /* HZ */ #include <asm/mach-types.h> -#include <asm/sched_clock.h> #include <mach/lm.h> #include <mach/irqs.h> diff --git a/arch/arm/mach-iop13xx/include/mach/iop13xx.h b/arch/arm/mach-iop13xx/include/mach/iop13xx.h index 7480f58267aa..17b40279e0a4 100644 --- a/arch/arm/mach-iop13xx/include/mach/iop13xx.h +++ b/arch/arm/mach-iop13xx/include/mach/iop13xx.h @@ -2,6 +2,9 @@ #define _IOP13XX_HW_H_ #ifndef __ASSEMBLY__ + +#include <linux/reboot.h> + /* The ATU offsets can change based on the strapping */ extern u32 iop13xx_atux_pmmr_offset; extern u32 iop13xx_atue_pmmr_offset; @@ -11,7 +14,7 @@ void iop13xx_map_io(void); void iop13xx_platform_init(void); void iop13xx_add_tpmi_devices(void); void iop13xx_init_irq(void); -void iop13xx_restart(char, const char *); +void iop13xx_restart(enum reboot_mode, const char *); /* CPUID CP6 R0 Page 0 */ static inline int iop13xx_cpu_id(void) diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 3181f61ea63e..96e6c7a6793b 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -469,7 +469,6 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); - dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_1: @@ -479,7 +478,6 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); - dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_2: @@ -489,7 +487,6 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); - dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_PQ, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); @@ -597,7 +594,7 @@ __setup("iop13xx_init_adma", iop13xx_init_adma_setup); __setup("iop13xx_init_uart", iop13xx_init_uart_setup); __setup("iop13xx_init_i2c", iop13xx_init_i2c_setup); -void iop13xx_restart(char mode, const char *cmd) +void iop13xx_restart(enum reboot_mode mode, const char *cmd) { /* * Reset the internal bus (warning both cores are reset) diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index ea0984a7449e..069144300b77 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -286,7 +286,7 @@ static void n2100_power_off(void) ; } -static void n2100_restart(char mode, const char *cmd) +static void n2100_restart(enum reboot_mode mode, const char *cmd) { gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW); gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT); diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index d7223b3b81f3..5327decde5a0 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -30,6 +30,7 @@ #include <linux/export.h> #include <linux/gpio.h> #include <linux/cpu.h> +#include <linux/sched_clock.h> #include <mach/udc.h> #include <mach/hardware.h> @@ -38,7 +39,6 @@ #include <asm/pgtable.h> #include <asm/page.h> #include <asm/irq.h> -#include <asm/sched_clock.h> #include <asm/system_misc.h> #include <asm/mach/map.h> @@ -531,9 +531,9 @@ static void __init ixp4xx_clockevent_init(void) 0xf, 0xfffffffe); } -void ixp4xx_restart(char mode, const char *cmd) +void ixp4xx_restart(enum reboot_mode mode, const char *cmd) { - if ( 1 && mode == 's') { + if ( 1 && mode == REBOOT_SOFT) { /* Jump into ROM at address 0 */ soft_restart(0); } else { diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c index 5d413f8c5700..63de1b3fd06b 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-setup.c +++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c @@ -27,6 +27,8 @@ #include <linux/i2c.h> #include <linux/i2c-gpio.h> +#include <mach/hardware.h> + #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h index db5afb69c123..4c4c6a6f4526 100644 --- a/arch/arm/mach-ixp4xx/include/mach/platform.h +++ b/arch/arm/mach-ixp4xx/include/mach/platform.h @@ -13,6 +13,8 @@ #ifndef __ASSEMBLY__ +#include <linux/reboot.h> + #include <asm/types.h> #ifndef __ARMEB__ @@ -123,7 +125,7 @@ extern void ixp4xx_init_early(void); extern void ixp4xx_init_irq(void); extern void ixp4xx_sys_init(void); extern void ixp4xx_timer_init(void); -extern void ixp4xx_restart(char, const char *); +extern void ixp4xx_restart(enum reboot_mode, const char *); extern void ixp4xx_pci_preinit(void); struct pci_sys_data; extern int ixp4xx_setup(int nr, struct pci_sys_data *sys); diff --git a/arch/arm/mach-ixp4xx/include/mach/timex.h b/arch/arm/mach-ixp4xx/include/mach/timex.h index c9e930f29339..0396d89f947c 100644 --- a/arch/arm/mach-ixp4xx/include/mach/timex.h +++ b/arch/arm/mach-ixp4xx/include/mach/timex.h @@ -3,7 +3,7 @@ * */ -#include <mach/hardware.h> +#include <mach/ixp4xx-regs.h> /* * We use IXP425 General purpose timer for our timer needs, it runs at diff --git a/arch/arm/mach-ixp4xx/omixp-setup.c b/arch/arm/mach-ixp4xx/omixp-setup.c index 46a89f5e8269..75ef03dc9964 100644 --- a/arch/arm/mach-ixp4xx/omixp-setup.c +++ b/arch/arm/mach-ixp4xx/omixp-setup.c @@ -27,6 +27,8 @@ #include <asm/mach/arch.h> #include <asm/mach/flash.h> +#include <mach/hardware.h> + static struct resource omixp_flash_resources[] = { { .flags = IORESOURCE_MEM, diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 7c72c725b711..e9238b5567ee 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -20,6 +20,7 @@ #include <linux/mv643xx_i2c.h> #include <linux/timex.h> #include <linux/kexec.h> +#include <linux/reboot.h> #include <net/dsa.h> #include <asm/page.h> #include <asm/mach/map.h> @@ -722,7 +723,7 @@ void __init kirkwood_init(void) #endif } -void kirkwood_restart(char mode, const char *cmd) +void kirkwood_restart(enum reboot_mode mode, const char *cmd) { /* * Enable soft reset to assert RSTOUTn. diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h index 1c09f3f93fbb..fcf3ba682e24 100644 --- a/arch/arm/mach-kirkwood/common.h +++ b/arch/arm/mach-kirkwood/common.h @@ -11,6 +11,8 @@ #ifndef __ARCH_KIRKWOOD_COMMON_H #define __ARCH_KIRKWOOD_COMMON_H +#include <linux/reboot.h> + struct dsa_platform_data; struct mv643xx_eth_platform_data; struct mv_sata_platform_data; @@ -53,7 +55,7 @@ void kirkwood_audio_init(void); void kirkwood_cpuidle_init(void); void kirkwood_cpufreq_init(void); -void kirkwood_restart(char, const char *); +void kirkwood_restart(enum reboot_mode, const char *); void kirkwood_clk_init(void); /* board init functions for boards not fully converted to fdt */ diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h index d4cbe5e81bb4..91242c944d7a 100644 --- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h +++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h @@ -21,14 +21,12 @@ #define CPU_RESET 0x00000002 #define RSTOUTn_MASK (BRIDGE_VIRT_BASE + 0x0108) -#define WDT_RESET_OUT_EN 0x00000002 #define SOFT_RESET_OUT_EN 0x00000004 #define SYSTEM_SOFT_RESET (BRIDGE_VIRT_BASE + 0x010c) #define SOFT_RESET 0x00000001 #define BRIDGE_CAUSE (BRIDGE_VIRT_BASE + 0x0110) -#define WDT_INT_REQ 0x0008 #define BRIDGE_INT_TIMER1_CLR (~0x0004) diff --git a/arch/arm/mach-ks8695/generic.h b/arch/arm/mach-ks8695/generic.h index 6e97ce462d73..43253f8e6de4 100644 --- a/arch/arm/mach-ks8695/generic.h +++ b/arch/arm/mach-ks8695/generic.h @@ -12,5 +12,5 @@ extern __init void ks8695_map_io(void); extern __init void ks8695_init_irq(void); -extern void ks8695_restart(char, const char *); +extern void ks8695_restart(enum reboot_mode, const char *); extern void ks8695_timer_init(void); diff --git a/arch/arm/mach-ks8695/time.c b/arch/arm/mach-ks8695/time.c index c272a3863d5f..426c97662f5b 100644 --- a/arch/arm/mach-ks8695/time.c +++ b/arch/arm/mach-ks8695/time.c @@ -154,11 +154,11 @@ void __init ks8695_timer_init(void) setup_irq(KS8695_IRQ_TIMER1, &ks8695_timer_irq); } -void ks8695_restart(char mode, const char *cmd) +void ks8695_restart(enum reboot_mode reboot_mode, const char *cmd) { unsigned int reg; - if (mode == 's') + if (reboot_mode == REBOOT_SOFT) soft_restart(0); /* disable timer0 */ diff --git a/arch/arm/mach-lpc32xx/common.c b/arch/arm/mach-lpc32xx/common.c index 0d4db8c544b5..d7aa54c25c59 100644 --- a/arch/arm/mach-lpc32xx/common.c +++ b/arch/arm/mach-lpc32xx/common.c @@ -207,11 +207,11 @@ void __init lpc32xx_map_io(void) iotable_init(lpc32xx_io_desc, ARRAY_SIZE(lpc32xx_io_desc)); } -void lpc23xx_restart(char mode, const char *cmd) +void lpc23xx_restart(enum reboot_mode mode, const char *cmd) { switch (mode) { - case 's': - case 'h': + case REBOOT_SOFT: + case REBOOT_HARD: lpc32xx_watchdog_reset(); break; diff --git a/arch/arm/mach-lpc32xx/common.h b/arch/arm/mach-lpc32xx/common.h index e0b26062a272..1cd8853b2f9b 100644 --- a/arch/arm/mach-lpc32xx/common.h +++ b/arch/arm/mach-lpc32xx/common.h @@ -21,6 +21,7 @@ #include <mach/board.h> #include <linux/platform_device.h> +#include <linux/reboot.h> /* * Other arch specific structures and functions @@ -29,7 +30,7 @@ extern void lpc32xx_timer_init(void); extern void __init lpc32xx_init_irq(void); extern void __init lpc32xx_map_io(void); extern void __init lpc32xx_serial_init(void); -extern void lpc23xx_restart(char, const char *); +extern void lpc23xx_restart(enum reboot_mode, const char *); /* diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index c1cd5a943ab1..e54f87ec2e4a 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -182,8 +182,8 @@ static void pl08x_put_signal(const struct pl08x_channel_data *cd, int ch) static struct pl08x_platform_data pl08x_pd = { .slave_channels = &pl08x_slave_channels[0], .num_slave_channels = ARRAY_SIZE(pl08x_slave_channels), - .get_signal = pl08x_get_signal, - .put_signal = pl08x_put_signal, + .get_xfer_signal = pl08x_get_signal, + .put_xfer_signal = pl08x_put_signal, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, }; diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c index 5b660ec09ef5..0c002099c3a3 100644 --- a/arch/arm/mach-mmp/aspenite.c +++ b/arch/arm/mach-mmp/aspenite.c @@ -210,7 +210,7 @@ struct pxa168fb_mach_info aspenite_lcd_info = { .invert_pixclock = 0, }; -static unsigned int aspenite_matrix_key_map[] = { +static const unsigned int aspenite_matrix_key_map[] = { KEY(0, 6, KEY_UP), /* SW 4 */ KEY(0, 7, KEY_DOWN), /* SW 5 */ KEY(1, 6, KEY_LEFT), /* SW 6 */ @@ -219,11 +219,15 @@ static unsigned int aspenite_matrix_key_map[] = { KEY(4, 7, KEY_ESC), /* SW 9 */ }; +static struct matrix_keymap_data aspenite_matrix_keymap_data = { + .keymap = aspenite_matrix_key_map, + .keymap_size = ARRAY_SIZE(aspenite_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data aspenite_keypad_info __initdata = { .matrix_key_rows = 5, .matrix_key_cols = 8, - .matrix_key_map = aspenite_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(aspenite_matrix_key_map), + .matrix_keymap_data = &aspenite_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-mmp/common.c b/arch/arm/mach-mmp/common.c index 9292b7966e3b..c03b4ab582db 100644 --- a/arch/arm/mach-mmp/common.c +++ b/arch/arm/mach-mmp/common.c @@ -47,7 +47,7 @@ void __init mmp_map_io(void) mmp_chip_id = __raw_readl(MMP_CHIPID); } -void mmp_restart(char mode, const char *cmd) +void mmp_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0); } diff --git a/arch/arm/mach-mmp/common.h b/arch/arm/mach-mmp/common.h index 0bdc50b134ce..991d7e9877de 100644 --- a/arch/arm/mach-mmp/common.h +++ b/arch/arm/mach-mmp/common.h @@ -1,10 +1,11 @@ +#include <linux/reboot.h> #define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x) extern void timer_init(int irq); extern void __init icu_init_irq(void); extern void __init mmp_map_io(void); -extern void mmp_restart(char, const char *); +extern void mmp_restart(enum reboot_mode, const char *); extern void __init pxa168_clk_init(void); extern void __init pxa910_clk_init(void); extern void __init mmp2_clk_init(void); diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h index 7ed1df21ea1c..459c2d03eb5c 100644 --- a/arch/arm/mach-mmp/include/mach/pxa168.h +++ b/arch/arm/mach-mmp/include/mach/pxa168.h @@ -1,9 +1,11 @@ #ifndef __ASM_MACH_PXA168_H #define __ASM_MACH_PXA168_H +#include <linux/reboot.h> + extern void pxa168_timer_init(void); extern void __init pxa168_init_irq(void); -extern void pxa168_restart(char, const char *); +extern void pxa168_restart(enum reboot_mode, const char *); extern void pxa168_clear_keypad_wakeup(void); #include <linux/i2c.h> diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index a30dcf3b7d9e..144e997624c0 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c @@ -172,7 +172,7 @@ int __init pxa168_add_usb_host(struct mv_usb_platform_data *pdata) return platform_device_register(&pxa168_device_usb_host); } -void pxa168_restart(char mode, const char *cmd) +void pxa168_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0xffff0000); } diff --git a/arch/arm/mach-mmp/teton_bga.c b/arch/arm/mach-mmp/teton_bga.c index e4d95b4c6bb2..6aa53fb29d26 100644 --- a/arch/arm/mach-mmp/teton_bga.c +++ b/arch/arm/mach-mmp/teton_bga.c @@ -61,11 +61,15 @@ static unsigned int teton_bga_matrix_key_map[] = { KEY(1, 7, KEY_RIGHT), }; +static struct matrix_keymap_data teton_bga_matrix_keymap_data = { + .keymap = teton_bga_matrix_key_map, + .keymap_size = ARRAY_SIZE(teton_bga_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data teton_bga_keypad_info __initdata = { .matrix_key_rows = 2, .matrix_key_cols = 8, - .matrix_key_map = teton_bga_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(teton_bga_matrix_key_map), + .matrix_keymap_data = &teton_bga_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 86a18b3d252e..7ac41e83cfef 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -28,8 +28,8 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <mach/addr-map.h> #include <mach/regs-timers.h> #include <mach/regs-apbc.h> diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 284313f3e02c..b6418fd5fe0d 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -23,10 +23,10 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> #include <asm/localtimer.h> -#include <asm/sched_clock.h> #include "common.h" diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c index 749a7f8c4992..75062eff2494 100644 --- a/arch/arm/mach-mv78xx0/common.c +++ b/arch/arm/mach-mv78xx0/common.c @@ -413,7 +413,7 @@ void __init mv78xx0_init(void) clk_init(); } -void mv78xx0_restart(char mode, const char *cmd) +void mv78xx0_restart(enum reboot_mode mode, const char *cmd) { /* * Enable soft reset to assert RSTOUTn. diff --git a/arch/arm/mach-mv78xx0/common.h b/arch/arm/mach-mv78xx0/common.h index 5e9485bad0ac..6889af26077d 100644 --- a/arch/arm/mach-mv78xx0/common.h +++ b/arch/arm/mach-mv78xx0/common.h @@ -11,6 +11,8 @@ #ifndef __ARCH_MV78XX0_COMMON_H #define __ARCH_MV78XX0_COMMON_H +#include <linux/reboot.h> + struct mv643xx_eth_platform_data; struct mv_sata_platform_data; @@ -45,7 +47,7 @@ void mv78xx0_uart1_init(void); void mv78xx0_uart2_init(void); void mv78xx0_uart3_init(void); void mv78xx0_i2c_init(void); -void mv78xx0_restart(char, const char *); +void mv78xx0_restart(enum reboot_mode, const char *); extern void mv78xx0_timer_init(void); diff --git a/arch/arm/mach-mvebu/common.h b/arch/arm/mach-mvebu/common.h index 98defd5e92cd..e366010e1d91 100644 --- a/arch/arm/mach-mvebu/common.h +++ b/arch/arm/mach-mvebu/common.h @@ -17,7 +17,9 @@ #define ARMADA_XP_MAX_CPUS 4 -void mvebu_restart(char mode, const char *cmd); +#include <linux/reboot.h> + +void mvebu_restart(enum reboot_mode mode, const char *cmd); void armada_370_xp_init_irq(void); void armada_370_xp_handle_irq(struct pt_regs *regs); diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c index b8079df8c986..f875124ff4f9 100644 --- a/arch/arm/mach-mvebu/system-controller.c +++ b/arch/arm/mach-mvebu/system-controller.c @@ -26,6 +26,7 @@ #include <linux/init.h> #include <linux/of_address.h> #include <linux/io.h> +#include <linux/reboot.h> static void __iomem *system_controller_base; @@ -63,7 +64,7 @@ static struct of_device_id of_system_controller_table[] = { { /* end of list */ }, }; -void mvebu_restart(char mode, const char *cmd) +void mvebu_restart(enum reboot_mode mode, const char *cmd) { if (!system_controller_base) { pr_err("Cannot restart, system-controller not available: check the device tree\n"); diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig index 616fe0210da1..8cde9e05b5d6 100644 --- a/arch/arm/mach-mxs/Kconfig +++ b/arch/arm/mach-mxs/Kconfig @@ -10,7 +10,6 @@ config SOC_IMX28 select ARM_AMBA select ARM_CPU_SUSPEND if PM select CPU_ARM926T - select HAVE_CAN_FLEXCAN if CAN select PINCTRL_IMX28 config ARCH_MXS diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 7fa611c1b287..4ce27b536dc9 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -14,12 +14,12 @@ #include <linux/clk/mxs.h> #include <linux/clkdev.h> #include <linux/clocksource.h> -#include <linux/can/platform/flexcan.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/irqchip/mxs.h> +#include <linux/reboot.h> #include <linux/micrel_phy.h> #include <linux/of_address.h> #include <linux/of_platform.h> @@ -76,41 +76,6 @@ static inline void __mxs_togl(u32 mask, void __iomem *reg) __raw_writel(mask, reg + MXS_TOG_ADDR); } -/* - * MX28EVK_FLEXCAN_SWITCH is shared between both flexcan controllers - */ -#define MX28EVK_FLEXCAN_SWITCH MXS_GPIO_NR(2, 13) - -static int flexcan0_en, flexcan1_en; - -static void mx28evk_flexcan_switch(void) -{ - if (flexcan0_en || flexcan1_en) - gpio_set_value(MX28EVK_FLEXCAN_SWITCH, 1); - else - gpio_set_value(MX28EVK_FLEXCAN_SWITCH, 0); -} - -static void mx28evk_flexcan0_switch(int enable) -{ - flexcan0_en = enable; - mx28evk_flexcan_switch(); -} - -static void mx28evk_flexcan1_switch(int enable) -{ - flexcan1_en = enable; - mx28evk_flexcan_switch(); -} - -static struct flexcan_platform_data flexcan_pdata[2]; - -static struct of_dev_auxdata mxs_auxdata_lookup[] __initdata = { - OF_DEV_AUXDATA("fsl,imx28-flexcan", 0x80032000, NULL, &flexcan_pdata[0]), - OF_DEV_AUXDATA("fsl,imx28-flexcan", 0x80034000, NULL, &flexcan_pdata[1]), - { /* sentinel */ } -}; - #define OCOTP_WORD_OFFSET 0x20 #define OCOTP_WORD_COUNT 0x20 @@ -270,15 +235,6 @@ static void __init imx28_evk_init(void) mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); } -static void __init imx28_evk_post_init(void) -{ - if (!gpio_request_one(MX28EVK_FLEXCAN_SWITCH, GPIOF_DIR_OUT, - "flexcan-switch")) { - flexcan_pdata[0].transceiver_switch = mx28evk_flexcan0_switch; - flexcan_pdata[1].transceiver_switch = mx28evk_flexcan1_switch; - } -} - static int apx4devkit_phy_fixup(struct phy_device *phy) { phy->dev_flags |= MICREL_PHY_50MHZ_CLK; @@ -484,13 +440,10 @@ static void __init mxs_machine_init(void) crystalfontz_init(); of_platform_populate(NULL, of_default_bus_match_table, - mxs_auxdata_lookup, parent); + NULL, parent); if (of_machine_is_compatible("karo,tx28")) tx28_post_init(); - - if (of_machine_is_compatible("fsl,imx28-evk")) - imx28_evk_post_init(); } #define MX23_CLKCTRL_RESET_OFFSET 0x120 @@ -500,7 +453,7 @@ static void __init mxs_machine_init(void) /* * Reset the system. It is called by machine_restart(). */ -static void mxs_restart(char mode, const char *cmd) +static void mxs_restart(enum reboot_mode mode, const char *cmd) { struct device_node *np; void __iomem *reset_addr; diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index 1504b68f4c66..db25b0cef3a7 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c @@ -24,6 +24,7 @@ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/irqchip/arm-vic.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <asm/mach/map.h> #include <mach/netx-regs.h> @@ -187,7 +188,7 @@ static int __init netx_init(void) subsys_initcall(netx_init); -void netx_restart(char mode, const char *cmd) +void netx_restart(enum reboot_mode mode, const char *cmd) { writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES, NETX_SYSTEM_RES_CR); diff --git a/arch/arm/mach-netx/generic.h b/arch/arm/mach-netx/generic.h index 768b26bbb42b..bb2ce471cc28 100644 --- a/arch/arm/mach-netx/generic.h +++ b/arch/arm/mach-netx/generic.h @@ -17,8 +17,10 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/reboot.h> + extern void __init netx_map_io(void); extern void __init netx_init_irq(void); -extern void netx_restart(char, const char *); +extern void netx_restart(enum reboot_mode, const char *); extern void netx_timer_init(void); diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c index 2df209ed1a07..13e0df9c11ce 100644 --- a/arch/arm/mach-nomadik/cpu-8815.c +++ b/arch/arm/mach-nomadik/cpu-8815.c @@ -103,7 +103,7 @@ static void __init cpu8815_map_io(void) iotable_init(cpu8815_io_desc, ARRAY_SIZE(cpu8815_io_desc)); } -static void cpu8815_restart(char mode, const char *cmd) +static void cpu8815_restart(enum reboot_mode mode, const char *cmd) { void __iomem *srcbase = ioremap(NOMADIK_SRC_BASE, SZ_4K); diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c index 6c116e1a4b01..4677a9ccb3cb 100644 --- a/arch/arm/mach-omap1/board-voiceblue.c +++ b/arch/arm/mach-omap1/board-voiceblue.c @@ -26,6 +26,7 @@ #include <linux/serial_reg.h> #include <linux/smc91x.h> #include <linux/export.h> +#include <linux/reboot.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -215,7 +216,7 @@ void voiceblue_wdt_ping(void) gpio_set_value(0, wdt_gpio_state); } -static void voiceblue_restart(char mode, const char *cmd) +static void voiceblue_restart(enum reboot_mode mode, const char *cmd) { /* * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h index 14f7e9920479..abec019a5281 100644 --- a/arch/arm/mach-omap1/common.h +++ b/arch/arm/mach-omap1/common.h @@ -28,6 +28,7 @@ #include <linux/mtd/mtd.h> #include <linux/i2c-omap.h> +#include <linux/reboot.h> #include <plat/i2c.h> @@ -70,7 +71,7 @@ static inline int omap_serial_wakeup_init(void) void omap1_init_early(void); void omap1_init_irq(void); void omap1_init_late(void); -void omap1_restart(char, const char *); +void omap1_restart(enum reboot_mode, const char *); extern void __init omap_check_revision(void); diff --git a/arch/arm/mach-omap1/reset.c b/arch/arm/mach-omap1/reset.c index 5eebd7e889d0..72bf4bf4a702 100644 --- a/arch/arm/mach-omap1/reset.c +++ b/arch/arm/mach-omap1/reset.c @@ -3,6 +3,7 @@ */ #include <linux/kernel.h> #include <linux/io.h> +#include <linux/reboot.h> #include <mach/hardware.h> @@ -22,7 +23,7 @@ #define OMAP_EXTWARM_RST_SRC_ID_SHIFT 5 -void omap1_restart(char mode, const char *cmd) +void omap1_restart(enum reboot_mode mode, const char *cmd) { /* * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 726ec23d29c7..80603d2fef77 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -43,9 +43,9 @@ #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/io.h> +#include <linux/sched_clock.h> #include <asm/irq.h> -#include <asm/sched_clock.h> #include <mach/hardware.h> #include <asm/mach/irq.h> diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index c7b32a966f67..627fa7e41fba 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -1,63 +1,10 @@ config ARCH_OMAP bool -config ARCH_OMAP2PLUS - bool "TI OMAP2/3/4/5 SoCs with device tree support" if (ARCH_MULTI_V6 || ARCH_MULTI_V7) - select ARCH_HAS_CPUFREQ - select ARCH_HAS_BANDGAP - select ARCH_HAS_HOLES_MEMORYMODEL - select ARCH_OMAP - select ARCH_REQUIRE_GPIOLIB - select CLKDEV_LOOKUP - select CLKSRC_MMIO - select GENERIC_CLOCKEVENTS - select GENERIC_IRQ_CHIP - select HAVE_CLK - select OMAP_DM_TIMER - select PINCTRL - select PROC_DEVICETREE if PROC_FS - select SOC_BUS - select SPARSE_IRQ - select TI_PRIV_EDMA - select USE_OF - help - Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 - - -if ARCH_OMAP2PLUS - -menu "TI OMAP2/3/4 Specific Features" - -config ARCH_OMAP2PLUS_TYPICAL - bool "Typical OMAP configuration" - default y - select AEABI - select HIGHMEM - select I2C - select I2C_OMAP - select MENELAUS if ARCH_OMAP2 - select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 - select PM_RUNTIME - select REGULATOR - select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 - select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 - select VFP - help - Compile a kernel suitable for booting most boards - -config SOC_HAS_OMAP2_SDRC - bool "OMAP2 SDRAM Controller support" - -config SOC_HAS_REALTIME_COUNTER - bool "Real time free running counter" - depends on SOC_OMAP5 - default y - config ARCH_OMAP2 bool "TI OMAP2" - depends on ARCH_OMAP2PLUS depends on ARCH_MULTI_V6 - default y + select ARCH_OMAP2PLUS select CPU_V6 select MULTI_IRQ_HANDLER select SOC_HAS_OMAP2_SDRC @@ -65,9 +12,8 @@ config ARCH_OMAP2 config ARCH_OMAP3 bool "TI OMAP3" - depends on ARCH_OMAP2PLUS depends on ARCH_MULTI_V7 - default y + select ARCH_OMAP2PLUS select ARCH_HAS_OPP select ARM_CPU_SUSPEND if PM select CPU_V7 @@ -81,9 +27,8 @@ config ARCH_OMAP3 config ARCH_OMAP4 bool "TI OMAP4" - default y - depends on ARCH_OMAP2PLUS depends on ARCH_MULTI_V7 + select ARCH_OMAP2PLUS select ARCH_HAS_OPP select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP select ARM_CPU_SUSPEND if PM @@ -108,12 +53,87 @@ config ARCH_OMAP4 config SOC_OMAP5 bool "TI OMAP5" depends on ARCH_MULTI_V7 + select ARCH_OMAP2PLUS select ARM_CPU_SUSPEND if PM select ARM_GIC select CPU_V7 + select HAVE_ARM_SCU if SMP + select HAVE_ARM_TWD if LOCAL_TIMERS select HAVE_SMP select COMMON_CLK select HAVE_ARM_ARCH_TIMER + select ARM_ERRATA_798181 + +config SOC_AM33XX + bool "AM33XX support" + depends on ARCH_MULTI_V7 + select ARCH_OMAP2PLUS + select ARM_CPU_SUSPEND if PM + select CPU_V7 + select MULTI_IRQ_HANDLER + select COMMON_CLK + +config SOC_AM43XX + bool "TI AM43x" + depends on ARCH_MULTI_V7 + select CPU_V7 + select ARCH_OMAP2PLUS + select MULTI_IRQ_HANDLER + select ARM_GIC + select COMMON_CLK + select MACH_OMAP_GENERIC + +config ARCH_OMAP2PLUS + bool + select ARCH_HAS_BANDGAP + select ARCH_HAS_CPUFREQ + select ARCH_HAS_HOLES_MEMORYMODEL + select ARCH_OMAP + select ARCH_REQUIRE_GPIOLIB + select CLKDEV_LOOKUP + select CLKSRC_MMIO + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_CHIP + select HAVE_CLK + select OMAP_DM_TIMER + select PINCTRL + select PROC_DEVICETREE if PROC_FS + select SOC_BUS + select SPARSE_IRQ + select TI_PRIV_EDMA + select USE_OF + help + Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 + + +if ARCH_OMAP2PLUS + +menu "TI OMAP2/3/4 Specific Features" + +config ARCH_OMAP2PLUS_TYPICAL + bool "Typical OMAP configuration" + default y + select AEABI + select HIGHMEM + select I2C + select I2C_OMAP + select MENELAUS if ARCH_OMAP2 + select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 + select PM_RUNTIME + select REGULATOR + select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 + select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 + select VFP + help + Compile a kernel suitable for booting most boards + +config SOC_HAS_OMAP2_SDRC + bool "OMAP2 SDRAM Controller support" + +config SOC_HAS_REALTIME_COUNTER + bool "Real time free running counter" + depends on SOC_OMAP5 + default y comment "OMAP Core Type" depends on ARCH_OMAP2 @@ -142,23 +162,6 @@ config SOC_TI81XX depends on ARCH_OMAP3 default y -config SOC_AM33XX - bool "AM33XX support" - depends on ARCH_MULTI_V7 - default y - select ARM_CPU_SUSPEND if PM - select CPU_V7 - select MULTI_IRQ_HANDLER - select COMMON_CLK - -config SOC_AM43XX - bool "TI AM43x" - select CPU_V7 - select MULTI_IRQ_HANDLER - select ARM_GIC - select COMMON_CLK - select MACH_OMAP_GENERIC - config OMAP_PACKAGE_ZAF bool diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index ea5a27ff9941..d4f671547c37 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -95,10 +95,6 @@ obj-$(CONFIG_POWER_AVS_OMAP_CLASS3) += smartreflex-class3.o AFLAGS_sleep24xx.o :=-Wa,-march=armv6 AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec) -ifeq ($(CONFIG_PM_VERBOSE),y) -CFLAGS_pm_bus.o += -DDEBUG -endif - endif ifeq ($(CONFIG_CPU_IDLE),y) diff --git a/arch/arm/mach-omap2/am33xx-restart.c b/arch/arm/mach-omap2/am33xx-restart.c index 88e4fa8af031..1eae96212315 100644 --- a/arch/arm/mach-omap2/am33xx-restart.c +++ b/arch/arm/mach-omap2/am33xx-restart.c @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ #include <linux/kernel.h> +#include <linux/reboot.h> #include "common.h" #include "prm-regbits-33xx.h" @@ -19,7 +20,7 @@ * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ -void am33xx_restart(char mode, const char *cmd) +void am33xx_restart(enum reboot_mode mode, const char *cmd) { /* TODO: Handle mode and cmd if necessary */ diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index b54562d1235e..87e65dde8e13 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -553,6 +553,37 @@ static struct usbhs_omap_platform_data igep3_usbhs_bdata __initdata = { #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { + /* Display Sub System */ + OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + /* TFP410 PanelBus DVI Transmitte (GPIO_170) */ + OMAP3_MUX(HDQ_SIO, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */ OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), { .reg_offset = OMAP_MUX_TERMINATOR }, diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c index bd74f9f6063b..bdd1e3a179e1 100644 --- a/arch/arm/mach-omap2/board-rx51-video.c +++ b/arch/arm/mach-omap2/board-rx51-video.c @@ -61,7 +61,7 @@ static struct omap_dss_board_info rx51_dss_board_info = { static int __init rx51_video_init(void) { - if (!machine_is_nokia_rx51()) + if (!machine_is_nokia_rx51() && !of_machine_is_compatible("nokia,omap3-n900")) return 0; if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) { diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 72cab3f4f16d..dfcc182ecff9 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -31,6 +31,7 @@ #include <linux/i2c.h> #include <linux/i2c/twl.h> #include <linux/i2c-omap.h> +#include <linux/reboot.h> #include <asm/proc-fns.h> @@ -119,33 +120,33 @@ static inline void omap_soc_device_init(void) #endif #if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430) -void omap2xxx_restart(char mode, const char *cmd); +void omap2xxx_restart(enum reboot_mode mode, const char *cmd); #else -static inline void omap2xxx_restart(char mode, const char *cmd) +static inline void omap2xxx_restart(enum reboot_mode mode, const char *cmd) { } #endif #ifdef CONFIG_SOC_AM33XX -void am33xx_restart(char mode, const char *cmd); +void am33xx_restart(enum reboot_mode mode, const char *cmd); #else -static inline void am33xx_restart(char mode, const char *cmd) +static inline void am33xx_restart(enum reboot_mode mode, const char *cmd) { } #endif #ifdef CONFIG_ARCH_OMAP3 -void omap3xxx_restart(char mode, const char *cmd); +void omap3xxx_restart(enum reboot_mode mode, const char *cmd); #else -static inline void omap3xxx_restart(char mode, const char *cmd) +static inline void omap3xxx_restart(enum reboot_mode mode, const char *cmd) { } #endif #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) -void omap44xx_restart(char mode, const char *cmd); +void omap44xx_restart(enum reboot_mode mode, const char *cmd); #else -static inline void omap44xx_restart(char mode, const char *cmd) +static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd) { } #endif diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index aef96e45cb20..3c1279f27d1f 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -15,7 +15,6 @@ #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> -#include <linux/gpio.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/pinctrl/machine.h> @@ -66,7 +65,7 @@ static int __init omap3_l3_init(void) WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); - return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; + return PTR_RET(pdev); } omap_postcore_initcall(omap3_l3_init); @@ -100,7 +99,7 @@ static int __init omap4_l3_init(void) WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); - return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; + return PTR_RET(pdev); } omap_postcore_initcall(omap4_l3_init); diff --git a/arch/arm/mach-omap2/fb.c b/arch/arm/mach-omap2/fb.c index 190ae493c6ef..2ca33cc0c484 100644 --- a/arch/arm/mach-omap2/fb.c +++ b/arch/arm/mach-omap2/fb.c @@ -83,10 +83,7 @@ static int __init omap_init_vrfb(void) pdev = platform_device_register_resndata(NULL, "omapvrfb", -1, res, num_res, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - else - return 0; + return PTR_RET(pdev); } omap_arch_initcall(omap_init_vrfb); diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 1c7969e965d7..f3fdd6afa213 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1734,7 +1734,7 @@ static int __init omap_gpmc_init(void) pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0); WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); - return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; + return PTR_RET(pdev); } omap_postcore_initcall(omap_gpmc_init); diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index fe3253a100e7..4a3f06f02859 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -394,7 +394,7 @@ static void __init omap_hwmod_init_postsetup(void) omap_pm_if_early_init(); } -static void __init omap_common_late_init(void) +static void __init __maybe_unused omap_common_late_init(void) { omap_mux_late_init(); omap2_common_pm_late_init(); diff --git a/arch/arm/mach-omap2/omap2-restart.c b/arch/arm/mach-omap2/omap2-restart.c index 719b716a4494..68423e26399d 100644 --- a/arch/arm/mach-omap2/omap2-restart.c +++ b/arch/arm/mach-omap2/omap2-restart.c @@ -31,7 +31,7 @@ static struct clk *reset_virt_prcm_set_ck, *reset_sys_ck; * Set the DPLL to bypass so that reboot completes successfully. No * return value. */ -void omap2xxx_restart(char mode, const char *cmd) +void omap2xxx_restart(enum reboot_mode mode, const char *cmd) { u32 rate; diff --git a/arch/arm/mach-omap2/omap3-restart.c b/arch/arm/mach-omap2/omap3-restart.c index 923c582189e5..5de2a0c2979d 100644 --- a/arch/arm/mach-omap2/omap3-restart.c +++ b/arch/arm/mach-omap2/omap3-restart.c @@ -12,6 +12,7 @@ */ #include <linux/kernel.h> #include <linux/init.h> +#include <linux/reboot.h> #include "iomap.h" #include "common.h" @@ -28,7 +29,7 @@ * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ -void omap3xxx_restart(char mode, const char *cmd) +void omap3xxx_restart(enum reboot_mode mode, const char *cmd) { omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0)); omap3xxx_prm_dpll3_reset(); /* never returns */ diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 38cd3a69cff3..57911430324e 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -23,6 +23,7 @@ #include <linux/export.h> #include <linux/irqchip/arm-gic.h> #include <linux/of_address.h> +#include <linux/reboot.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-omap2/omap4-restart.c b/arch/arm/mach-omap2/omap4-restart.c index f90e02e11898..41dfd7da8170 100644 --- a/arch/arm/mach-omap2/omap4-restart.c +++ b/arch/arm/mach-omap2/omap4-restart.c @@ -8,6 +8,7 @@ */ #include <linux/types.h> +#include <linux/reboot.h> #include "prminst44xx.h" /** @@ -18,7 +19,7 @@ * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ -void omap44xx_restart(char mode, const char *cmd) +void omap44xx_restart(enum reboot_mode mode, const char *cmd) { /* XXX Should save 'cmd' into scratchpad for use after reboot */ omap4_prminst_global_warm_sw_reset(); /* never returns */ diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 68be532f8688..5cc92874be7e 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -588,11 +588,6 @@ static int _od_runtime_suspend(struct device *dev) return ret; } -static int _od_runtime_idle(struct device *dev) -{ - return pm_generic_runtime_idle(dev); -} - static int _od_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -648,7 +643,7 @@ static int _od_resume_noirq(struct device *dev) struct dev_pm_domain omap_device_pm_domain = { .ops = { SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, - _od_runtime_idle) + NULL) USE_PLATFORM_PM_SLEEP_OPS .suspend_noirq = _od_suspend_noirq, .resume_noirq = _od_resume_noirq, diff --git a/arch/arm/mach-omap2/pmu.c b/arch/arm/mach-omap2/pmu.c index 9ace8eae7ee8..33c8846b4193 100644 --- a/arch/arm/mach-omap2/pmu.c +++ b/arch/arm/mach-omap2/pmu.c @@ -54,10 +54,7 @@ static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[]) WARN(IS_ERR(omap_pmu_dev), "Can't build omap_device for %s.\n", dev_name); - if (IS_ERR(omap_pmu_dev)) - return PTR_ERR(omap_pmu_dev); - - return 0; + return PTR_RET(omap_pmu_dev); } static int __init omap_init_pmu(void) diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index 88ff83a0942e..9086ce03ae12 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -34,6 +34,8 @@ ppa_zero_params: ppa_por_params: .word 1, 0 +#ifdef CONFIG_ARCH_OMAP4 + /* * ============================= * == CPU suspend finisher == @@ -326,7 +328,9 @@ skip_l2en: b cpu_resume @ Jump to generic resume ENDPROC(omap4_cpu_resume) -#endif +#endif /* CONFIG_ARCH_OMAP4 */ + +#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */ #ifndef CONFIG_OMAP4_ERRATA_I688 ENTRY(omap_bus_sync) diff --git a/arch/arm/mach-omap2/smartreflex-class3.c b/arch/arm/mach-omap2/smartreflex-class3.c index aee3c8940a30..7a42e1960c3b 100644 --- a/arch/arm/mach-omap2/smartreflex-class3.c +++ b/arch/arm/mach-omap2/smartreflex-class3.c @@ -26,14 +26,14 @@ static int sr_class3_enable(struct omap_sr *sr) } omap_vp_enable(sr->voltdm); - return sr_enable(sr->voltdm, volt); + return sr_enable(sr, volt); } static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset) { - sr_disable_errgen(sr->voltdm); + sr_disable_errgen(sr); omap_vp_disable(sr->voltdm); - sr_disable(sr->voltdm); + sr_disable(sr); if (is_volt_reset) voltdm_reset(sr->voltdm); @@ -42,7 +42,7 @@ static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset) static int sr_class3_configure(struct omap_sr *sr) { - return sr_configure_errgen(sr->voltdm); + return sr_configure_errgen(sr); } /* SR class3 structure */ diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 3bdb0fb02028..b37e1fcbad56 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -41,10 +41,10 @@ #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/platform_data/dmtimer-omap.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> #include <asm/smp_twd.h> -#include <asm/sched_clock.h> #include "omap_hwmod.h" #include "omap_device.h" @@ -220,7 +220,7 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, int posted) { char name[10]; /* 10 = sizeof("gptXX_Xck0") */ - const char *oh_name; + const char *oh_name = NULL; struct device_node *np; struct omap_hwmod *oh; struct resource irq, mem; diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index f8a6db9239bf..b41599f98a8e 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -347,7 +347,7 @@ void __init orion5x_init(void) orion5x_wdt_init(); } -void orion5x_restart(char mode, const char *cmd) +void orion5x_restart(enum reboot_mode mode, const char *cmd) { /* * Enable and issue soft reset diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h index cdaa01f3d186..a909afb384fb 100644 --- a/arch/arm/mach-orion5x/common.h +++ b/arch/arm/mach-orion5x/common.h @@ -1,6 +1,8 @@ #ifndef __ARCH_ORION5X_COMMON_H #define __ARCH_ORION5X_COMMON_H +#include <linux/reboot.h> + struct dsa_platform_data; struct mv643xx_eth_platform_data; struct mv_sata_platform_data; @@ -29,7 +31,7 @@ void orion5x_spi_init(void); void orion5x_uart0_init(void); void orion5x_uart1_init(void); void orion5x_xor_init(void); -void orion5x_restart(char, const char *); +void orion5x_restart(enum reboot_mode, const char *); /* * PCIe/PCI functions. diff --git a/arch/arm/mach-orion5x/include/mach/bridge-regs.h b/arch/arm/mach-orion5x/include/mach/bridge-regs.h index 461fd69a10ae..f727d03f1688 100644 --- a/arch/arm/mach-orion5x/include/mach/bridge-regs.h +++ b/arch/arm/mach-orion5x/include/mach/bridge-regs.h @@ -18,7 +18,6 @@ #define CPU_CTRL (ORION5X_BRIDGE_VIRT_BASE + 0x104) #define RSTOUTn_MASK (ORION5X_BRIDGE_VIRT_BASE + 0x108) -#define WDT_RESET_OUT_EN 0x0002 #define CPU_SOFT_RESET (ORION5X_BRIDGE_VIRT_BASE + 0x10c) @@ -26,8 +25,6 @@ #define POWER_MNG_CTRL_REG (ORION5X_BRIDGE_VIRT_BASE + 0x11C) -#define WDT_INT_REQ 0x0008 - #define BRIDGE_INT_TIMER1_CLR (~0x0004) #define MAIN_IRQ_CAUSE (ORION5X_BRIDGE_VIRT_BASE + 0x200) diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c index 24f4e14e5893..6234977b5aea 100644 --- a/arch/arm/mach-orion5x/ls-chl-setup.c +++ b/arch/arm/mach-orion5x/ls-chl-setup.c @@ -139,7 +139,7 @@ static struct mv_sata_platform_data lschl_sata_data = { static void lschl_power_off(void) { - orion5x_restart('h', NULL); + orion5x_restart(REBOOT_HARD, NULL); } /***************************************************************************** diff --git a/arch/arm/mach-orion5x/ls_hgl-setup.c b/arch/arm/mach-orion5x/ls_hgl-setup.c index fc653bb41e78..fe04c4b64569 100644 --- a/arch/arm/mach-orion5x/ls_hgl-setup.c +++ b/arch/arm/mach-orion5x/ls_hgl-setup.c @@ -185,7 +185,7 @@ static struct mv_sata_platform_data ls_hgl_sata_data = { static void ls_hgl_power_off(void) { - orion5x_restart('h', NULL); + orion5x_restart(REBOOT_HARD, NULL); } diff --git a/arch/arm/mach-orion5x/lsmini-setup.c b/arch/arm/mach-orion5x/lsmini-setup.c index 18e66e617dc2..ca4dbe973daf 100644 --- a/arch/arm/mach-orion5x/lsmini-setup.c +++ b/arch/arm/mach-orion5x/lsmini-setup.c @@ -185,7 +185,7 @@ static struct mv_sata_platform_data lsmini_sata_data = { static void lsmini_power_off(void) { - orion5x_restart('h', NULL); + orion5x_restart(REBOOT_HARD, NULL); } diff --git a/arch/arm/mach-picoxcell/common.c b/arch/arm/mach-picoxcell/common.c index b13f51bc35cf..ec79fea82704 100644 --- a/arch/arm/mach-picoxcell/common.c +++ b/arch/arm/mach-picoxcell/common.c @@ -11,6 +11,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> +#include <linux/reboot.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -63,7 +64,7 @@ static const char *picoxcell_dt_match[] = { NULL }; -static void picoxcell_wdt_restart(char mode, const char *cmd) +static void picoxcell_wdt_restart(enum reboot_mode mode, const char *cmd) { /* * Configure the watchdog to reset with the shortest possible timeout diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h index 81135cd88e54..a6304858474a 100644 --- a/arch/arm/mach-prima2/common.h +++ b/arch/arm/mach-prima2/common.h @@ -10,6 +10,8 @@ #define __MACH_PRIMA2_COMMON_H__ #include <linux/init.h> +#include <linux/reboot.h> + #include <asm/mach/time.h> #include <asm/exception.h> @@ -22,7 +24,7 @@ extern void sirfsoc_cpu_die(unsigned int cpu); extern void __init sirfsoc_of_irq_init(void); extern void __init sirfsoc_of_clk_init(void); -extern void sirfsoc_restart(char, const char *); +extern void sirfsoc_restart(enum reboot_mode, const char *); extern asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs); #ifndef CONFIG_DEBUG_LL diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c index d5e0cbc934c0..ccb53391147a 100644 --- a/arch/arm/mach-prima2/rstc.c +++ b/arch/arm/mach-prima2/rstc.c @@ -13,6 +13,7 @@ #include <linux/device.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/reboot.h> void __iomem *sirfsoc_rstc_base; static DEFINE_MUTEX(rstc_lock); @@ -84,7 +85,7 @@ int sirfsoc_reset_device(struct device *dev) #define SIRFSOC_SYS_RST_BIT BIT(31) -void sirfsoc_restart(char mode, const char *cmd) +void sirfsoc_restart(enum reboot_mode mode, const char *cmd) { writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base); } diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 96100dbf5a2e..a8427115ee07 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -615,12 +615,14 @@ endmenu config PXA25x bool select CPU_XSCALE + select CPU_FREQ_TABLE if CPU_FREQ help Select code specific to PXA21x/25x/26x variants config PXA27x bool select CPU_XSCALE + select CPU_FREQ_TABLE if CPU_FREQ help Select code specific to PXA27x variants @@ -633,6 +635,7 @@ config CPU_PXA26x config PXA3xx bool select CPU_XSC3 + select CPU_FREQ_TABLE if CPU_FREQ help Select code specific to PXA3xx variants diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index a5b8fead7d61..f162f1b77cd2 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -663,16 +663,16 @@ static void corgi_poweroff(void) /* Green LED off tells the bootloader to halt */ gpio_set_value(CORGI_GPIO_LED_GREEN, 0); - pxa_restart('h', NULL); + pxa_restart(REBOOT_HARD, NULL); } -static void corgi_restart(char mode, const char *cmd) +static void corgi_restart(enum reboot_mode mode, const char *cmd) { if (!machine_is_corgi()) /* Green LED on tells the bootloader to reboot */ gpio_set_value(CORGI_GPIO_LED_GREEN, 1); - pxa_restart('h', cmd); + pxa_restart(REBOOT_HARD, cmd); } static void __init corgi_init(void) diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 446563a7d1ad..f6726bb4eb95 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -833,21 +833,25 @@ static inline void em_x270_init_ac97(void) {} #endif #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int em_x270_module_matrix_keys[] = { +static const unsigned int em_x270_module_matrix_keys[] = { KEY(0, 0, KEY_A), KEY(1, 0, KEY_UP), KEY(2, 1, KEY_B), KEY(0, 2, KEY_LEFT), KEY(1, 1, KEY_ENTER), KEY(2, 0, KEY_RIGHT), KEY(0, 1, KEY_C), KEY(1, 2, KEY_DOWN), KEY(2, 2, KEY_D), }; +static struct matrix_keymap_data em_x270_matrix_keymap_data = { + .keymap = em_x270_module_matrix_keys, + .keymap_size = ARRAY_SIZE(em_x270_module_matrix_keys), +}; + struct pxa27x_keypad_platform_data em_x270_module_keypad_info = { /* code map for the matrix keys */ .matrix_key_rows = 3, .matrix_key_cols = 3, - .matrix_key_map = em_x270_module_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(em_x270_module_matrix_keys), + .matrix_keymap_data = &em_x270_matrix_keymap_data, }; -static unsigned int em_x270_exeda_matrix_keys[] = { +static const unsigned int em_x270_exeda_matrix_keys[] = { KEY(0, 0, KEY_RIGHTSHIFT), KEY(0, 1, KEY_RIGHTCTRL), KEY(0, 2, KEY_RIGHTALT), KEY(0, 3, KEY_SPACE), KEY(0, 4, KEY_LEFTALT), KEY(0, 5, KEY_LEFTCTRL), @@ -889,12 +893,16 @@ static unsigned int em_x270_exeda_matrix_keys[] = { KEY(7, 6, 0), KEY(7, 7, 0), }; +static struct matrix_keymap_data em_x270_exeda_matrix_keymap_data = { + .keymap = em_x270_exeda_matrix_keys, + .keymap_size = ARRAY_SIZE(em_x270_exeda_matrix_keys), +}; + struct pxa27x_keypad_platform_data em_x270_exeda_keypad_info = { /* code map for the matrix keys */ .matrix_key_rows = 8, .matrix_key_cols = 8, - .matrix_key_map = em_x270_exeda_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(em_x270_exeda_matrix_keys), + .matrix_keymap_data = &em_x270_exeda_matrix_keymap_data, }; static void __init em_x270_init_keypad(void) diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c index dca10709be8f..fe2eb8394dff 100644 --- a/arch/arm/mach-pxa/ezx.c +++ b/arch/arm/mach-pxa/ezx.c @@ -392,7 +392,7 @@ static unsigned long e6_pin_config[] __initdata = { /* KEYPAD */ #ifdef CONFIG_MACH_EZX_A780 -static unsigned int a780_key_map[] = { +static const unsigned int a780_key_map[] = { KEY(0, 0, KEY_SEND), KEY(0, 1, KEY_BACK), KEY(0, 2, KEY_END), @@ -424,11 +424,15 @@ static unsigned int a780_key_map[] = { KEY(4, 4, KEY_DOWN), }; +static struct matrix_keymap_data a780_matrix_keymap_data = { + .keymap = a780_key_map, + .keymap_size = ARRAY_SIZE(a780_key_map), +}; + static struct pxa27x_keypad_platform_data a780_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 5, - .matrix_key_map = a780_key_map, - .matrix_key_map_size = ARRAY_SIZE(a780_key_map), + .matrix_keymap_data = &a780_matrix_keymap_data, .direct_key_map = { KEY_CAMERA }, .direct_key_num = 1, @@ -438,7 +442,7 @@ static struct pxa27x_keypad_platform_data a780_keypad_platform_data = { #endif /* CONFIG_MACH_EZX_A780 */ #ifdef CONFIG_MACH_EZX_E680 -static unsigned int e680_key_map[] = { +static const unsigned int e680_key_map[] = { KEY(0, 0, KEY_UP), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_RESERVED), @@ -455,11 +459,15 @@ static unsigned int e680_key_map[] = { KEY(2, 3, KEY_KPENTER), }; +static struct matrix_keymap_data e680_matrix_keymap_data = { + .keymap = e680_key_map, + .keymap_size = ARRAY_SIZE(e680_key_map), +}; + static struct pxa27x_keypad_platform_data e680_keypad_platform_data = { .matrix_key_rows = 3, .matrix_key_cols = 4, - .matrix_key_map = e680_key_map, - .matrix_key_map_size = ARRAY_SIZE(e680_key_map), + .matrix_keymap_data = &e680_matrix_keymap_data, .direct_key_map = { KEY_CAMERA, @@ -476,7 +484,7 @@ static struct pxa27x_keypad_platform_data e680_keypad_platform_data = { #endif /* CONFIG_MACH_EZX_E680 */ #ifdef CONFIG_MACH_EZX_A1200 -static unsigned int a1200_key_map[] = { +static const unsigned int a1200_key_map[] = { KEY(0, 0, KEY_RESERVED), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_PAGEDOWN), @@ -513,18 +521,22 @@ static unsigned int a1200_key_map[] = { KEY(4, 5, KEY_RESERVED), }; +static struct matrix_keymap_data a1200_matrix_keymap_data = { + .keymap = a1200_key_map, + .keymap_size = ARRAY_SIZE(a1200_key_map), +}; + static struct pxa27x_keypad_platform_data a1200_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 6, - .matrix_key_map = a1200_key_map, - .matrix_key_map_size = ARRAY_SIZE(a1200_key_map), + .matrix_keymap_data = &a1200_matrix_keymap_data, .debounce_interval = 30, }; #endif /* CONFIG_MACH_EZX_A1200 */ #ifdef CONFIG_MACH_EZX_E6 -static unsigned int e6_key_map[] = { +static const unsigned int e6_key_map[] = { KEY(0, 0, KEY_RESERVED), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_PAGEDOWN), @@ -561,18 +573,22 @@ static unsigned int e6_key_map[] = { KEY(4, 5, KEY_PREVIOUSSONG), }; +static struct matrix_keymap_data e6_keymap_data = { + .keymap = e6_key_map, + .keymap_size = ARRAY_SIZE(e6_key_map), +}; + static struct pxa27x_keypad_platform_data e6_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 6, - .matrix_key_map = e6_key_map, - .matrix_key_map_size = ARRAY_SIZE(e6_key_map), + .matrix_keymap_data = &e6_keymap_data, .debounce_interval = 30, }; #endif /* CONFIG_MACH_EZX_E6 */ #ifdef CONFIG_MACH_EZX_A910 -static unsigned int a910_key_map[] = { +static const unsigned int a910_key_map[] = { KEY(0, 0, KEY_NUMERIC_6), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_PAGEDOWN), @@ -609,18 +625,22 @@ static unsigned int a910_key_map[] = { KEY(4, 5, KEY_RESERVED), }; +static struct matrix_keymap_data a910_matrix_keymap_data = { + .keymap = a910_key_map, + .keymap_size = ARRAY_SIZE(a910_key_map), +}; + static struct pxa27x_keypad_platform_data a910_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 6, - .matrix_key_map = a910_key_map, - .matrix_key_map_size = ARRAY_SIZE(a910_key_map), + .matrix_keymap_data = &a910_matrix_keymap_data, .debounce_interval = 30, }; #endif /* CONFIG_MACH_EZX_A910 */ #ifdef CONFIG_MACH_EZX_E2 -static unsigned int e2_key_map[] = { +static const unsigned int e2_key_map[] = { KEY(0, 0, KEY_NUMERIC_6), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_NUMERIC_9), @@ -657,11 +677,15 @@ static unsigned int e2_key_map[] = { KEY(4, 5, KEY_RESERVED), }; +static struct matrix_keymap_data e2_matrix_keymap_data = { + .keymap = e2_key_map, + .keymap_size = ARRAY_SIZE(e2_key_map), +}; + static struct pxa27x_keypad_platform_data e2_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 6, - .matrix_key_map = e2_key_map, - .matrix_key_map_size = ARRAY_SIZE(e2_key_map), + .matrix_keymap_data = &e2_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h index fd7ea39b78c0..8963984d1f43 100644 --- a/arch/arm/mach-pxa/generic.h +++ b/arch/arm/mach-pxa/generic.h @@ -9,6 +9,8 @@ * published by the Free Software Foundation. */ +#include <linux/reboot.h> + struct irq_data; extern void pxa_timer_init(void); @@ -56,4 +58,4 @@ void __init pxa_set_btuart_info(void *info); void __init pxa_set_stuart_info(void *info); void __init pxa_set_hwuart_info(void *info); -void pxa_restart(char, const char *); +void pxa_restart(enum reboot_mode, const char *); diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c index e848c4607baf..5d665588c7eb 100644 --- a/arch/arm/mach-pxa/littleton.c +++ b/arch/arm/mach-pxa/littleton.c @@ -222,7 +222,7 @@ static inline void littleton_init_spi(void) {} #endif #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int littleton_matrix_key_map[] = { +static const unsigned int littleton_matrix_key_map[] = { /* KEY(row, col, key_code) */ KEY(1, 3, KEY_0), KEY(0, 0, KEY_1), KEY(1, 0, KEY_2), KEY(2, 0, KEY_3), KEY(0, 1, KEY_4), KEY(1, 1, KEY_5), KEY(2, 1, KEY_6), KEY(0, 2, KEY_7), @@ -249,11 +249,15 @@ static unsigned int littleton_matrix_key_map[] = { KEY(3, 1, KEY_F23), /* soft2 */ }; +static struct matrix_keymap_data littleton_matrix_keymap_data = { + .keymap = littleton_matrix_key_map, + .keymap_size = ARRAY_SIZE(littleton_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data littleton_keypad_info = { .matrix_key_rows = 6, .matrix_key_cols = 5, - .matrix_key_map = littleton_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(littleton_matrix_key_map), + .matrix_keymap_data = &littleton_matrix_keymap_data, .enable_rotary0 = 1, .rotary0_up_key = KEY_UP, diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index 7a12c1ba90ff..d2c652318376 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c @@ -498,7 +498,7 @@ static struct pxaohci_platform_data mainstone_ohci_platform_data = { }; #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int mainstone_matrix_keys[] = { +static const unsigned int mainstone_matrix_keys[] = { KEY(0, 0, KEY_A), KEY(1, 0, KEY_B), KEY(2, 0, KEY_C), KEY(3, 0, KEY_D), KEY(4, 0, KEY_E), KEY(5, 0, KEY_F), KEY(0, 1, KEY_G), KEY(1, 1, KEY_H), KEY(2, 1, KEY_I), @@ -527,11 +527,15 @@ static unsigned int mainstone_matrix_keys[] = { KEY(4, 6, KEY_SELECT), }; +static struct matrix_keymap_data mainstone_matrix_keymap_data = { + .keymap = mainstone_matrix_keys, + .keymap_size = ARRAY_SIZE(mainstone_matrix_keys), +}; + struct pxa27x_keypad_platform_data mainstone_keypad_info = { .matrix_key_rows = 6, .matrix_key_cols = 7, - .matrix_key_map = mainstone_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(mainstone_matrix_keys), + .matrix_keymap_data = &mainstone_matrix_keymap_data, .enable_rotary0 = 1, .rotary0_up_key = KEY_UP, diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index f8979b943cbf..acc9d3cc0762 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c @@ -37,6 +37,7 @@ #include <linux/wm97xx.h> #include <linux/mtd/physmap.h> #include <linux/usb/gpio_vbus.h> +#include <linux/reboot.h> #include <linux/regulator/max1586.h> #include <linux/slab.h> #include <linux/i2c/pxa-i2c.h> @@ -222,7 +223,7 @@ static struct pxafb_mach_info mioa701_pxafb_info = { /* * Keyboard configuration */ -static unsigned int mioa701_matrix_keys[] = { +static const unsigned int mioa701_matrix_keys[] = { KEY(0, 0, KEY_UP), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_MEDIA), @@ -233,11 +234,16 @@ static unsigned int mioa701_matrix_keys[] = { KEY(2, 1, KEY_PHONE), /* Phone Green key */ KEY(2, 2, KEY_CAMERA) /* Camera key */ }; + +static struct matrix_keymap_data mioa701_matrix_keymap_data = { + .keymap = mioa701_matrix_keys, + .keymap_size = ARRAY_SIZE(mioa701_matrix_keys), +}; + static struct pxa27x_keypad_platform_data mioa701_keypad_info = { .matrix_key_rows = 3, .matrix_key_cols = 3, - .matrix_key_map = mioa701_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(mioa701_matrix_keys), + .matrix_keymap_data = &mioa701_matrix_keymap_data, }; /* @@ -691,13 +697,13 @@ static void mioa701_machine_exit(void); static void mioa701_poweroff(void) { mioa701_machine_exit(); - pxa_restart('s', NULL); + pxa_restart(REBOOT_SOFT, NULL); } -static void mioa701_restart(char c, const char *cmd) +static void mioa701_restart(enum reboot_mode c, const char *cmd) { mioa701_machine_exit(); - pxa_restart('s', cmd); + pxa_restart(REBOOT_SOFT, cmd); } static struct gpio global_gpios[] = { @@ -756,7 +762,6 @@ static void mioa701_machine_exit(void) MACHINE_START(MIOA701, "MIO A701") .atag_offset = 0x100, - .restart_mode = 's', .map_io = &pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = &pxa27x_init_irq, diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c index 909b713e5789..cf210b11ffcc 100644 --- a/arch/arm/mach-pxa/palmld.c +++ b/arch/arm/mach-pxa/palmld.c @@ -173,7 +173,7 @@ static inline void palmld_nor_init(void) {} * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int palmld_matrix_keys[] = { +static const unsigned int palmld_matrix_keys[] = { KEY(0, 1, KEY_F2), KEY(0, 2, KEY_UP), @@ -190,11 +190,15 @@ static unsigned int palmld_matrix_keys[] = { KEY(3, 2, KEY_LEFT), }; +static struct matrix_keymap_data palmld_matrix_keymap_data = { + .keymap = palmld_matrix_keys, + .keymap_size = ARRAY_SIZE(palmld_matrix_keys), +}; + static struct pxa27x_keypad_platform_data palmld_keypad_platform_data = { .matrix_key_rows = 4, .matrix_key_cols = 3, - .matrix_key_map = palmld_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(palmld_matrix_keys), + .matrix_keymap_data = &palmld_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c index 5033fd07968f..3ed9b029428b 100644 --- a/arch/arm/mach-pxa/palmt5.c +++ b/arch/arm/mach-pxa/palmt5.c @@ -108,7 +108,7 @@ static unsigned long palmt5_pin_config[] __initdata = { * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int palmt5_matrix_keys[] = { +static const unsigned int palmt5_matrix_keys[] = { KEY(0, 0, KEY_POWER), KEY(0, 1, KEY_F1), KEY(0, 2, KEY_ENTER), @@ -124,11 +124,15 @@ static unsigned int palmt5_matrix_keys[] = { KEY(3, 2, KEY_LEFT), }; +static struct matrix_keymap_data palmt5_matrix_keymap_data = { + .keymap = palmt5_matrix_keys, + .keymap_size = ARRAY_SIZE(palmt5_matrix_keys), +}; + static struct pxa27x_keypad_platform_data palmt5_keypad_platform_data = { .matrix_key_rows = 4, .matrix_key_cols = 3, - .matrix_key_map = palmt5_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(palmt5_matrix_keys), + .matrix_keymap_data = &palmt5_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c index d82a50b4a803..d8b937c870de 100644 --- a/arch/arm/mach-pxa/palmtreo.c +++ b/arch/arm/mach-pxa/palmtreo.c @@ -168,7 +168,7 @@ static unsigned long centro685_pin_config[] __initdata = { * GPIO keyboard ******************************************************************************/ #if IS_ENABLED(CONFIG_KEYBOARD_PXA27x) -static unsigned int treo680_matrix_keys[] = { +static const unsigned int treo680_matrix_keys[] = { KEY(0, 0, KEY_F8), /* Red/Off/Power */ KEY(0, 1, KEY_LEFT), KEY(0, 2, KEY_LEFTCTRL), /* Alternate */ @@ -227,7 +227,7 @@ static unsigned int treo680_matrix_keys[] = { KEY(7, 5, KEY_I), }; -static unsigned int centro_matrix_keys[] = { +static const unsigned int centro_matrix_keys[] = { KEY(0, 0, KEY_F9), /* Home */ KEY(0, 1, KEY_LEFT), KEY(0, 2, KEY_LEFTCTRL), /* Alternate */ @@ -286,11 +286,20 @@ static unsigned int centro_matrix_keys[] = { KEY(7, 5, KEY_I), }; +static struct matrix_keymap_data treo680_matrix_keymap_data = { + .keymap = treo680_matrix_keys, + .keymap_size = ARRAY_SIZE(treo680_matrix_keys), +}; + +static struct matrix_keymap_data centro_matrix_keymap_data = { + .keymap = centro_matrix_keys, + .keymap_size = ARRAY_SIZE(centro_matrix_keys), +}; + static struct pxa27x_keypad_platform_data treo680_keypad_pdata = { .matrix_key_rows = 8, .matrix_key_cols = 7, - .matrix_key_map = treo680_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(treo680_matrix_keys), + .matrix_keymap_data = &treo680_matrix_keymap_data, .direct_key_map = { KEY_CONNECT }, .direct_key_num = 1, @@ -301,10 +310,8 @@ static void __init palmtreo_kpc_init(void) { static struct pxa27x_keypad_platform_data *data = &treo680_keypad_pdata; - if (machine_is_centro()) { - data->matrix_key_map = centro_matrix_keys; - data->matrix_key_map_size = ARRAY_SIZE(centro_matrix_keys); - } + if (machine_is_centro()) + data->matrix_keymap_data = ¢ro_matrix_keymap_data; pxa_set_keypad_info(&treo680_keypad_pdata); } diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c index 627c93a7364c..83f830dd8ad8 100644 --- a/arch/arm/mach-pxa/palmtx.c +++ b/arch/arm/mach-pxa/palmtx.c @@ -176,7 +176,7 @@ static inline void palmtx_nor_init(void) {} * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int palmtx_matrix_keys[] = { +static const unsigned int palmtx_matrix_keys[] = { KEY(0, 0, KEY_POWER), KEY(0, 1, KEY_F1), KEY(0, 2, KEY_ENTER), @@ -192,11 +192,15 @@ static unsigned int palmtx_matrix_keys[] = { KEY(3, 2, KEY_LEFT), }; +static struct matrix_keymap_data palmtx_matrix_keymap_data = { + .keymap = palmtx_matrix_keys, + .keymap_size = ARRAY_SIZE(palmtx_matrix_keys), +}; + static struct pxa27x_keypad_platform_data palmtx_keypad_platform_data = { .matrix_key_rows = 4, .matrix_key_cols = 3, - .matrix_key_map = palmtx_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(palmtx_matrix_keys), + .matrix_keymap_data = &palmtx_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 18b7fcd98592..1a35ddf218da 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c @@ -140,7 +140,7 @@ static unsigned long palmz72_pin_config[] __initdata = { * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int palmz72_matrix_keys[] = { +static const unsigned int palmz72_matrix_keys[] = { KEY(0, 0, KEY_POWER), KEY(0, 1, KEY_F1), KEY(0, 2, KEY_ENTER), @@ -156,11 +156,15 @@ static unsigned int palmz72_matrix_keys[] = { KEY(3, 2, KEY_LEFT), }; +static struct matrix_keymap_data almz72_matrix_keymap_data = { + .keymap = palmz72_matrix_keys, + .keymap_size = ARRAY_SIZE(palmz72_matrix_keys), +}; + static struct pxa27x_keypad_platform_data palmz72_keypad_platform_data = { .matrix_key_rows = 4, .matrix_key_cols = 3, - .matrix_key_map = palmz72_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(palmz72_matrix_keys), + .matrix_keymap_data = &almz72_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index 50ccd5f1d560..711d37e26bd8 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c @@ -422,7 +422,7 @@ static struct i2c_board_info __initdata poodle_i2c_devices[] = { static void poodle_poweroff(void) { - pxa_restart('h', NULL); + pxa_restart(REBOOT_HARD, NULL); } static void __init poodle_init(void) diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index 3fab583755d4..0d5dd646f61f 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c @@ -83,7 +83,7 @@ static void do_hw_reset(void) writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); } -void pxa_restart(char mode, const char *cmd) +void pxa_restart(enum reboot_mode mode, const char *cmd) { local_irq_disable(); local_fiq_disable(); @@ -91,14 +91,14 @@ void pxa_restart(char mode, const char *cmd) clear_reset_status(RESET_STATUS_ALL); switch (mode) { - case 's': + case REBOOT_SOFT: /* Jump into ROM at address 0 */ soft_restart(0); break; - case 'g': + case REBOOT_GPIO: do_gpio_reset(); break; - case 'h': + case REBOOT_HARD: default: do_hw_reset(); break; diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 362726c49c70..2125df0444e7 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -31,6 +31,7 @@ #include <linux/regulator/machine.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/reboot.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -924,10 +925,10 @@ static inline void spitz_i2c_init(void) {} ******************************************************************************/ static void spitz_poweroff(void) { - pxa_restart('g', NULL); + pxa_restart(REBOOT_GPIO, NULL); } -static void spitz_restart(char mode, const char *cmd) +static void spitz_restart(enum reboot_mode mode, const char *cmd) { uint32_t msc0 = __raw_readl(MSC0); /* Bootloader magic for a reboot */ @@ -979,7 +980,6 @@ static void __init spitz_fixup(struct tag *tags, char **cmdline, #ifdef CONFIG_MACH_SPITZ MACHINE_START(SPITZ, "SHARP Spitz") - .restart_mode = 'g', .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, @@ -993,7 +993,6 @@ MACHINE_END #ifdef CONFIG_MACH_BORZOI MACHINE_START(BORZOI, "SHARP Borzoi") - .restart_mode = 'g', .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, @@ -1007,7 +1006,6 @@ MACHINE_END #ifdef CONFIG_MACH_AKITA MACHINE_START(AKITA, "SHARP Akita") - .restart_mode = 'g', .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c index f55979c09a5f..4680efe55345 100644 --- a/arch/arm/mach-pxa/tavorevb.c +++ b/arch/arm/mach-pxa/tavorevb.c @@ -106,7 +106,7 @@ static struct platform_device smc91x_device = { }; #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int tavorevb_matrix_key_map[] = { +static const unsigned int tavorevb_matrix_key_map[] = { /* KEY(row, col, key_code) */ KEY(0, 4, KEY_A), KEY(0, 5, KEY_B), KEY(0, 6, KEY_C), KEY(1, 4, KEY_E), KEY(1, 5, KEY_F), KEY(1, 6, KEY_G), @@ -147,11 +147,15 @@ static unsigned int tavorevb_matrix_key_map[] = { KEY(3, 3, KEY_F23), /* soft2 */ }; +static struct matrix_keymap_data tavorevb_matrix_keymap_data = { + .keymap = tavorevb_matrix_key_map, + .keymap_size = ARRAY_SIZE(tavorevb_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data tavorevb_keypad_info = { .matrix_key_rows = 7, .matrix_key_cols = 7, - .matrix_key_map = tavorevb_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(tavorevb_matrix_key_map), + .matrix_keymap_data = &tavorevb_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 8f1ee92aea30..9aa852a8fab9 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c @@ -16,11 +16,11 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/clockchips.h> +#include <linux/sched_clock.h> #include <asm/div64.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> -#include <asm/sched_clock.h> #include <mach/regs-ost.h> #include <mach/irqs.h> diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 3d91d2e5bf3a..0206b915a6f6 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -36,6 +36,7 @@ #include <linux/input/matrix_keypad.h> #include <linux/i2c/pxa-i2c.h> #include <linux/usb/gpio_vbus.h> +#include <linux/reboot.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -911,10 +912,10 @@ static struct platform_device *devices[] __initdata = { static void tosa_poweroff(void) { - pxa_restart('g', NULL); + pxa_restart(REBOOT_GPIO, NULL); } -static void tosa_restart(char mode, const char *cmd) +static void tosa_restart(enum reboot_mode mode, const char *cmd) { uint32_t msc0 = __raw_readl(MSC0); @@ -969,7 +970,6 @@ static void __init fixup_tosa(struct tag *tags, char **cmdline, } MACHINE_START(TOSA, "SHARP Tosa") - .restart_mode = 'g', .fixup = fixup_tosa, .map_io = pxa25x_map_io, .nr_irqs = TOSA_NR_IRQS, diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index 989903a7e467..2513d8f4931f 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c @@ -345,7 +345,7 @@ static inline void z2_leds_init(void) {} * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int z2_matrix_keys[] = { +static const unsigned int z2_matrix_keys[] = { KEY(0, 0, KEY_OPTION), KEY(1, 0, KEY_UP), KEY(2, 0, KEY_DOWN), @@ -405,11 +405,15 @@ static unsigned int z2_matrix_keys[] = { KEY(5, 7, KEY_DOT), }; +static struct matrix_keymap_data z2_matrix_keymap_data = { + .keymap = z2_matrix_keys, + .keymap_size = ARRAY_SIZE(z2_matrix_keys), +}; + static struct pxa27x_keypad_platform_data z2_keypad_platform_data = { .matrix_key_rows = 7, .matrix_key_cols = 8, - .matrix_key_map = z2_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(z2_matrix_keys), + .matrix_keymap_data = &z2_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c index 1f00d650ac27..36cf7cf95ec1 100644 --- a/arch/arm/mach-pxa/zylonite.c +++ b/arch/arm/mach-pxa/zylonite.c @@ -263,7 +263,7 @@ static inline void zylonite_init_mmc(void) {} #endif #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int zylonite_matrix_key_map[] = { +static const unsigned int zylonite_matrix_key_map[] = { /* KEY(row, col, key_code) */ KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_C), KEY(0, 5, KEY_D), KEY(1, 0, KEY_E), KEY(1, 1, KEY_F), KEY(1, 2, KEY_G), KEY(1, 5, KEY_H), @@ -306,11 +306,15 @@ static unsigned int zylonite_matrix_key_map[] = { KEY(0, 3, KEY_AUX), /* contact */ }; +static struct matrix_keymap_data zylonite_matrix_keymap_data = { + .keymap = zylonite_matrix_key_map, + .keymap_size = ARRAY_SIZE(zylonite_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data zylonite_keypad_info = { .matrix_key_rows = 8, .matrix_key_cols = 8, - .matrix_key_map = zylonite_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(zylonite_matrix_key_map), + .matrix_keymap_data = &zylonite_matrix_keymap_data, .enable_rotary0 = 1, .rotary0_up_key = KEY_UP, diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 5b1c8bfe6fa9..c85ddb2a0ad0 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c @@ -29,6 +29,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <asm/irq.h> @@ -418,7 +419,7 @@ static void __init realview_eb_timer_init(void) realview_eb_twd_init(); } -static void realview_eb_restart(char mode, const char *cmd) +static void realview_eb_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index d5e83a1f6982..c5eade76461b 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c @@ -31,6 +31,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <asm/irq.h> @@ -329,7 +330,7 @@ static void __init realview_pb1176_timer_init(void) realview_timer_init(IRQ_DC1176_TIMER0); } -static void realview_pb1176_restart(char mode, const char *cmd) +static void realview_pb1176_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c index c3cfe213b5e6..f4b0962578fe 100644 --- a/arch/arm/mach-realview/realview_pb11mp.c +++ b/arch/arm/mach-realview/realview_pb11mp.c @@ -29,6 +29,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <asm/irq.h> @@ -316,7 +317,7 @@ static void __init realview_pb11mp_timer_init(void) realview_pb11mp_twd_init(); } -static void realview_pb11mp_restart(char mode, const char *cmd) +static void realview_pb11mp_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c index dde652a59620..10a3e1d76891 100644 --- a/arch/arm/mach-realview/realview_pba8.c +++ b/arch/arm/mach-realview/realview_pba8.c @@ -29,6 +29,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/mach-types.h> @@ -264,7 +265,7 @@ static void __init realview_pba8_timer_init(void) realview_timer_init(IRQ_PBA8_TIMER0_1); } -static void realview_pba8_restart(char mode, const char *cmd) +static void realview_pba8_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index 54f0185b01e3..9d75493e3f0c 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c @@ -28,6 +28,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/mach-types.h> @@ -344,7 +345,7 @@ static void realview_pbx_fixup(struct tag *tags, char **from, #endif } -static void realview_pbx_restart(char mode, const char *cmd) +static void realview_pbx_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c index a302cf5e0fc7..09d602b10d57 100644 --- a/arch/arm/mach-rpc/riscpc.c +++ b/arch/arm/mach-rpc/riscpc.c @@ -20,6 +20,7 @@ #include <linux/ata_platform.h> #include <linux/io.h> #include <linux/i2c.h> +#include <linux/reboot.h> #include <asm/elf.h> #include <asm/mach-types.h> @@ -201,7 +202,7 @@ static int __init rpc_init(void) arch_initcall(rpc_init); -static void rpc_restart(char mode, const char *cmd) +static void rpc_restart(enum reboot_mode mode, const char *cmd) { iomd_writeb(0, IOMD_ROMCR0); diff --git a/arch/arm/mach-s3c24xx/common.h b/arch/arm/mach-s3c24xx/common.h index 307c3714be55..84b280654f4c 100644 --- a/arch/arm/mach-s3c24xx/common.h +++ b/arch/arm/mach-s3c24xx/common.h @@ -12,6 +12,8 @@ #ifndef __ARCH_ARM_MACH_S3C24XX_COMMON_H #define __ARCH_ARM_MACH_S3C24XX_COMMON_H __FILE__ +#include <linux/reboot.h> + struct s3c2410_uartcfg; #ifdef CONFIG_CPU_S3C2410 @@ -20,7 +22,7 @@ extern int s3c2410a_init(void); extern void s3c2410_map_io(void); extern void s3c2410_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c2410_init_clocks(int xtal); -extern void s3c2410_restart(char mode, const char *cmd); +extern void s3c2410_restart(enum reboot_mode mode, const char *cmd); extern void s3c2410_init_irq(void); #else #define s3c2410_init_clocks NULL @@ -36,7 +38,7 @@ extern void s3c2412_map_io(void); extern void s3c2412_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c2412_init_clocks(int xtal); extern int s3c2412_baseclk_add(void); -extern void s3c2412_restart(char mode, const char *cmd); +extern void s3c2412_restart(enum reboot_mode mode, const char *cmd); extern void s3c2412_init_irq(void); #else #define s3c2412_init_clocks NULL @@ -51,7 +53,7 @@ extern void s3c2416_map_io(void); extern void s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c2416_init_clocks(int xtal); extern int s3c2416_baseclk_add(void); -extern void s3c2416_restart(char mode, const char *cmd); +extern void s3c2416_restart(enum reboot_mode mode, const char *cmd); extern void s3c2416_init_irq(void); extern struct syscore_ops s3c2416_irq_syscore_ops; @@ -66,7 +68,7 @@ extern struct syscore_ops s3c2416_irq_syscore_ops; extern void s3c244x_map_io(void); extern void s3c244x_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c244x_init_clocks(int xtal); -extern void s3c244x_restart(char mode, const char *cmd); +extern void s3c244x_restart(enum reboot_mode mode, const char *cmd); #else #define s3c244x_init_clocks NULL #define s3c244x_init_uarts NULL @@ -96,7 +98,7 @@ extern void s3c2443_map_io(void); extern void s3c2443_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c2443_init_clocks(int xtal); extern int s3c2443_baseclk_add(void); -extern void s3c2443_restart(char mode, const char *cmd); +extern void s3c2443_restart(enum reboot_mode mode, const char *cmd); extern void s3c2443_init_irq(void); #else #define s3c2443_init_clocks NULL diff --git a/arch/arm/mach-s3c24xx/cpufreq-utils.c b/arch/arm/mach-s3c24xx/cpufreq-utils.c index ddd8280e3875..2a0aa5684e72 100644 --- a/arch/arm/mach-s3c24xx/cpufreq-utils.c +++ b/arch/arm/mach-s3c24xx/cpufreq-utils.c @@ -60,5 +60,5 @@ void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg) */ void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg) { - __raw_writel(cfg->pll.index, S3C2410_MPLLCON); + __raw_writel(cfg->pll.driver_data, S3C2410_MPLLCON); } diff --git a/arch/arm/mach-s3c24xx/pll-s3c2410.c b/arch/arm/mach-s3c24xx/pll-s3c2410.c index dcf3420a3271..5e37d368594b 100644 --- a/arch/arm/mach-s3c24xx/pll-s3c2410.c +++ b/arch/arm/mach-s3c24xx/pll-s3c2410.c @@ -33,36 +33,36 @@ #include <plat/cpu-freq-core.h> static struct cpufreq_frequency_table pll_vals_12MHz[] = { - { .frequency = 34000000, .index = PLLVAL(82, 2, 3), }, - { .frequency = 45000000, .index = PLLVAL(82, 1, 3), }, - { .frequency = 51000000, .index = PLLVAL(161, 3, 3), }, - { .frequency = 48000000, .index = PLLVAL(120, 2, 3), }, - { .frequency = 56000000, .index = PLLVAL(142, 2, 3), }, - { .frequency = 68000000, .index = PLLVAL(82, 2, 2), }, - { .frequency = 79000000, .index = PLLVAL(71, 1, 2), }, - { .frequency = 85000000, .index = PLLVAL(105, 2, 2), }, - { .frequency = 90000000, .index = PLLVAL(112, 2, 2), }, - { .frequency = 101000000, .index = PLLVAL(127, 2, 2), }, - { .frequency = 113000000, .index = PLLVAL(105, 1, 2), }, - { .frequency = 118000000, .index = PLLVAL(150, 2, 2), }, - { .frequency = 124000000, .index = PLLVAL(116, 1, 2), }, - { .frequency = 135000000, .index = PLLVAL(82, 2, 1), }, - { .frequency = 147000000, .index = PLLVAL(90, 2, 1), }, - { .frequency = 152000000, .index = PLLVAL(68, 1, 1), }, - { .frequency = 158000000, .index = PLLVAL(71, 1, 1), }, - { .frequency = 170000000, .index = PLLVAL(77, 1, 1), }, - { .frequency = 180000000, .index = PLLVAL(82, 1, 1), }, - { .frequency = 186000000, .index = PLLVAL(85, 1, 1), }, - { .frequency = 192000000, .index = PLLVAL(88, 1, 1), }, - { .frequency = 203000000, .index = PLLVAL(161, 3, 1), }, + { .frequency = 34000000, .driver_data = PLLVAL(82, 2, 3), }, + { .frequency = 45000000, .driver_data = PLLVAL(82, 1, 3), }, + { .frequency = 51000000, .driver_data = PLLVAL(161, 3, 3), }, + { .frequency = 48000000, .driver_data = PLLVAL(120, 2, 3), }, + { .frequency = 56000000, .driver_data = PLLVAL(142, 2, 3), }, + { .frequency = 68000000, .driver_data = PLLVAL(82, 2, 2), }, + { .frequency = 79000000, .driver_data = PLLVAL(71, 1, 2), }, + { .frequency = 85000000, .driver_data = PLLVAL(105, 2, 2), }, + { .frequency = 90000000, .driver_data = PLLVAL(112, 2, 2), }, + { .frequency = 101000000, .driver_data = PLLVAL(127, 2, 2), }, + { .frequency = 113000000, .driver_data = PLLVAL(105, 1, 2), }, + { .frequency = 118000000, .driver_data = PLLVAL(150, 2, 2), }, + { .frequency = 124000000, .driver_data = PLLVAL(116, 1, 2), }, + { .frequency = 135000000, .driver_data = PLLVAL(82, 2, 1), }, + { .frequency = 147000000, .driver_data = PLLVAL(90, 2, 1), }, + { .frequency = 152000000, .driver_data = PLLVAL(68, 1, 1), }, + { .frequency = 158000000, .driver_data = PLLVAL(71, 1, 1), }, + { .frequency = 170000000, .driver_data = PLLVAL(77, 1, 1), }, + { .frequency = 180000000, .driver_data = PLLVAL(82, 1, 1), }, + { .frequency = 186000000, .driver_data = PLLVAL(85, 1, 1), }, + { .frequency = 192000000, .driver_data = PLLVAL(88, 1, 1), }, + { .frequency = 203000000, .driver_data = PLLVAL(161, 3, 1), }, /* 2410A extras */ - { .frequency = 210000000, .index = PLLVAL(132, 2, 1), }, - { .frequency = 226000000, .index = PLLVAL(105, 1, 1), }, - { .frequency = 266000000, .index = PLLVAL(125, 1, 1), }, - { .frequency = 268000000, .index = PLLVAL(126, 1, 1), }, - { .frequency = 270000000, .index = PLLVAL(127, 1, 1), }, + { .frequency = 210000000, .driver_data = PLLVAL(132, 2, 1), }, + { .frequency = 226000000, .driver_data = PLLVAL(105, 1, 1), }, + { .frequency = 266000000, .driver_data = PLLVAL(125, 1, 1), }, + { .frequency = 268000000, .driver_data = PLLVAL(126, 1, 1), }, + { .frequency = 270000000, .driver_data = PLLVAL(127, 1, 1), }, }; static int s3c2410_plls_add(struct device *dev, struct subsys_interface *sif) diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c index 673781758319..a19460e6e7b0 100644 --- a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c +++ b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c @@ -21,33 +21,33 @@ #include <plat/cpu-freq-core.h> static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = { - { .frequency = 75000000, .index = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ - { .frequency = 80000000, .index = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ - { .frequency = 90000000, .index = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ - { .frequency = 100000000, .index = PLLVAL(0x5c, 1, 3), }, /* FVco 800.000000 */ - { .frequency = 110000000, .index = PLLVAL(0x66, 1, 3), }, /* FVco 880.000000 */ - { .frequency = 120000000, .index = PLLVAL(0x70, 1, 3), }, /* FVco 960.000000 */ - { .frequency = 150000000, .index = PLLVAL(0x75, 3, 2), }, /* FVco 600.000000 */ - { .frequency = 160000000, .index = PLLVAL(0x98, 4, 2), }, /* FVco 640.000000 */ - { .frequency = 170000000, .index = PLLVAL(0x4d, 1, 2), }, /* FVco 680.000000 */ - { .frequency = 180000000, .index = PLLVAL(0x70, 2, 2), }, /* FVco 720.000000 */ - { .frequency = 190000000, .index = PLLVAL(0x57, 1, 2), }, /* FVco 760.000000 */ - { .frequency = 200000000, .index = PLLVAL(0x5c, 1, 2), }, /* FVco 800.000000 */ - { .frequency = 210000000, .index = PLLVAL(0x84, 2, 2), }, /* FVco 840.000000 */ - { .frequency = 220000000, .index = PLLVAL(0x66, 1, 2), }, /* FVco 880.000000 */ - { .frequency = 230000000, .index = PLLVAL(0x6b, 1, 2), }, /* FVco 920.000000 */ - { .frequency = 240000000, .index = PLLVAL(0x70, 1, 2), }, /* FVco 960.000000 */ - { .frequency = 300000000, .index = PLLVAL(0x75, 3, 1), }, /* FVco 600.000000 */ - { .frequency = 310000000, .index = PLLVAL(0x93, 4, 1), }, /* FVco 620.000000 */ - { .frequency = 320000000, .index = PLLVAL(0x98, 4, 1), }, /* FVco 640.000000 */ - { .frequency = 330000000, .index = PLLVAL(0x66, 2, 1), }, /* FVco 660.000000 */ - { .frequency = 340000000, .index = PLLVAL(0x4d, 1, 1), }, /* FVco 680.000000 */ - { .frequency = 350000000, .index = PLLVAL(0xa7, 4, 1), }, /* FVco 700.000000 */ - { .frequency = 360000000, .index = PLLVAL(0x70, 2, 1), }, /* FVco 720.000000 */ - { .frequency = 370000000, .index = PLLVAL(0xb1, 4, 1), }, /* FVco 740.000000 */ - { .frequency = 380000000, .index = PLLVAL(0x57, 1, 1), }, /* FVco 760.000000 */ - { .frequency = 390000000, .index = PLLVAL(0x7a, 2, 1), }, /* FVco 780.000000 */ - { .frequency = 400000000, .index = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */ + { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ + { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ + { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ + { .frequency = 100000000, .driver_data = PLLVAL(0x5c, 1, 3), }, /* FVco 800.000000 */ + { .frequency = 110000000, .driver_data = PLLVAL(0x66, 1, 3), }, /* FVco 880.000000 */ + { .frequency = 120000000, .driver_data = PLLVAL(0x70, 1, 3), }, /* FVco 960.000000 */ + { .frequency = 150000000, .driver_data = PLLVAL(0x75, 3, 2), }, /* FVco 600.000000 */ + { .frequency = 160000000, .driver_data = PLLVAL(0x98, 4, 2), }, /* FVco 640.000000 */ + { .frequency = 170000000, .driver_data = PLLVAL(0x4d, 1, 2), }, /* FVco 680.000000 */ + { .frequency = 180000000, .driver_data = PLLVAL(0x70, 2, 2), }, /* FVco 720.000000 */ + { .frequency = 190000000, .driver_data = PLLVAL(0x57, 1, 2), }, /* FVco 760.000000 */ + { .frequency = 200000000, .driver_data = PLLVAL(0x5c, 1, 2), }, /* FVco 800.000000 */ + { .frequency = 210000000, .driver_data = PLLVAL(0x84, 2, 2), }, /* FVco 840.000000 */ + { .frequency = 220000000, .driver_data = PLLVAL(0x66, 1, 2), }, /* FVco 880.000000 */ + { .frequency = 230000000, .driver_data = PLLVAL(0x6b, 1, 2), }, /* FVco 920.000000 */ + { .frequency = 240000000, .driver_data = PLLVAL(0x70, 1, 2), }, /* FVco 960.000000 */ + { .frequency = 300000000, .driver_data = PLLVAL(0x75, 3, 1), }, /* FVco 600.000000 */ + { .frequency = 310000000, .driver_data = PLLVAL(0x93, 4, 1), }, /* FVco 620.000000 */ + { .frequency = 320000000, .driver_data = PLLVAL(0x98, 4, 1), }, /* FVco 640.000000 */ + { .frequency = 330000000, .driver_data = PLLVAL(0x66, 2, 1), }, /* FVco 660.000000 */ + { .frequency = 340000000, .driver_data = PLLVAL(0x4d, 1, 1), }, /* FVco 680.000000 */ + { .frequency = 350000000, .driver_data = PLLVAL(0xa7, 4, 1), }, /* FVco 700.000000 */ + { .frequency = 360000000, .driver_data = PLLVAL(0x70, 2, 1), }, /* FVco 720.000000 */ + { .frequency = 370000000, .driver_data = PLLVAL(0xb1, 4, 1), }, /* FVco 740.000000 */ + { .frequency = 380000000, .driver_data = PLLVAL(0x57, 1, 1), }, /* FVco 760.000000 */ + { .frequency = 390000000, .driver_data = PLLVAL(0x7a, 2, 1), }, /* FVco 780.000000 */ + { .frequency = 400000000, .driver_data = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */ }; static int s3c2440_plls12_add(struct device *dev, struct subsys_interface *sif) diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c index debfa106289b..1191b2905625 100644 --- a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c +++ b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c @@ -21,61 +21,61 @@ #include <plat/cpu-freq-core.h> static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = { - { .frequency = 78019200, .index = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ - { .frequency = 84067200, .index = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ - { .frequency = 90115200, .index = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ - { .frequency = 96163200, .index = PLLVAL(151, 5, 3), }, /* FVco 769.305600 */ - { .frequency = 102135600, .index = PLLVAL(185, 6, 3), }, /* FVco 817.084800 */ - { .frequency = 108259200, .index = PLLVAL(171, 5, 3), }, /* FVco 866.073600 */ - { .frequency = 114307200, .index = PLLVAL(127, 3, 3), }, /* FVco 914.457600 */ - { .frequency = 120234240, .index = PLLVAL(134, 3, 3), }, /* FVco 961.873920 */ - { .frequency = 126161280, .index = PLLVAL(141, 3, 3), }, /* FVco 1009.290240 */ - { .frequency = 132088320, .index = PLLVAL(148, 3, 3), }, /* FVco 1056.706560 */ - { .frequency = 138015360, .index = PLLVAL(155, 3, 3), }, /* FVco 1104.122880 */ - { .frequency = 144789120, .index = PLLVAL(163, 3, 3), }, /* FVco 1158.312960 */ - { .frequency = 150100363, .index = PLLVAL(187, 9, 2), }, /* FVco 600.401454 */ - { .frequency = 156038400, .index = PLLVAL(121, 5, 2), }, /* FVco 624.153600 */ - { .frequency = 162086400, .index = PLLVAL(126, 5, 2), }, /* FVco 648.345600 */ - { .frequency = 168134400, .index = PLLVAL(131, 5, 2), }, /* FVco 672.537600 */ - { .frequency = 174048000, .index = PLLVAL(177, 7, 2), }, /* FVco 696.192000 */ - { .frequency = 180230400, .index = PLLVAL(141, 5, 2), }, /* FVco 720.921600 */ - { .frequency = 186278400, .index = PLLVAL(124, 4, 2), }, /* FVco 745.113600 */ - { .frequency = 192326400, .index = PLLVAL(151, 5, 2), }, /* FVco 769.305600 */ - { .frequency = 198132480, .index = PLLVAL(109, 3, 2), }, /* FVco 792.529920 */ - { .frequency = 204271200, .index = PLLVAL(185, 6, 2), }, /* FVco 817.084800 */ - { .frequency = 210268800, .index = PLLVAL(141, 4, 2), }, /* FVco 841.075200 */ - { .frequency = 216518400, .index = PLLVAL(171, 5, 2), }, /* FVco 866.073600 */ - { .frequency = 222264000, .index = PLLVAL(97, 2, 2), }, /* FVco 889.056000 */ - { .frequency = 228614400, .index = PLLVAL(127, 3, 2), }, /* FVco 914.457600 */ - { .frequency = 234259200, .index = PLLVAL(158, 4, 2), }, /* FVco 937.036800 */ - { .frequency = 240468480, .index = PLLVAL(134, 3, 2), }, /* FVco 961.873920 */ - { .frequency = 246960000, .index = PLLVAL(167, 4, 2), }, /* FVco 987.840000 */ - { .frequency = 252322560, .index = PLLVAL(141, 3, 2), }, /* FVco 1009.290240 */ - { .frequency = 258249600, .index = PLLVAL(114, 2, 2), }, /* FVco 1032.998400 */ - { .frequency = 264176640, .index = PLLVAL(148, 3, 2), }, /* FVco 1056.706560 */ - { .frequency = 270950400, .index = PLLVAL(120, 2, 2), }, /* FVco 1083.801600 */ - { .frequency = 276030720, .index = PLLVAL(155, 3, 2), }, /* FVco 1104.122880 */ - { .frequency = 282240000, .index = PLLVAL(92, 1, 2), }, /* FVco 1128.960000 */ - { .frequency = 289578240, .index = PLLVAL(163, 3, 2), }, /* FVco 1158.312960 */ - { .frequency = 294235200, .index = PLLVAL(131, 2, 2), }, /* FVco 1176.940800 */ - { .frequency = 300200727, .index = PLLVAL(187, 9, 1), }, /* FVco 600.401454 */ - { .frequency = 306358690, .index = PLLVAL(191, 9, 1), }, /* FVco 612.717380 */ - { .frequency = 312076800, .index = PLLVAL(121, 5, 1), }, /* FVco 624.153600 */ - { .frequency = 318366720, .index = PLLVAL(86, 3, 1), }, /* FVco 636.733440 */ - { .frequency = 324172800, .index = PLLVAL(126, 5, 1), }, /* FVco 648.345600 */ - { .frequency = 330220800, .index = PLLVAL(109, 4, 1), }, /* FVco 660.441600 */ - { .frequency = 336268800, .index = PLLVAL(131, 5, 1), }, /* FVco 672.537600 */ - { .frequency = 342074880, .index = PLLVAL(93, 3, 1), }, /* FVco 684.149760 */ - { .frequency = 348096000, .index = PLLVAL(177, 7, 1), }, /* FVco 696.192000 */ - { .frequency = 355622400, .index = PLLVAL(118, 4, 1), }, /* FVco 711.244800 */ - { .frequency = 360460800, .index = PLLVAL(141, 5, 1), }, /* FVco 720.921600 */ - { .frequency = 366206400, .index = PLLVAL(165, 6, 1), }, /* FVco 732.412800 */ - { .frequency = 372556800, .index = PLLVAL(124, 4, 1), }, /* FVco 745.113600 */ - { .frequency = 378201600, .index = PLLVAL(126, 4, 1), }, /* FVco 756.403200 */ - { .frequency = 384652800, .index = PLLVAL(151, 5, 1), }, /* FVco 769.305600 */ - { .frequency = 391608000, .index = PLLVAL(177, 6, 1), }, /* FVco 783.216000 */ - { .frequency = 396264960, .index = PLLVAL(109, 3, 1), }, /* FVco 792.529920 */ - { .frequency = 402192000, .index = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */ + { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ + { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ + { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ + { .frequency = 96163200, .driver_data = PLLVAL(151, 5, 3), }, /* FVco 769.305600 */ + { .frequency = 102135600, .driver_data = PLLVAL(185, 6, 3), }, /* FVco 817.084800 */ + { .frequency = 108259200, .driver_data = PLLVAL(171, 5, 3), }, /* FVco 866.073600 */ + { .frequency = 114307200, .driver_data = PLLVAL(127, 3, 3), }, /* FVco 914.457600 */ + { .frequency = 120234240, .driver_data = PLLVAL(134, 3, 3), }, /* FVco 961.873920 */ + { .frequency = 126161280, .driver_data = PLLVAL(141, 3, 3), }, /* FVco 1009.290240 */ + { .frequency = 132088320, .driver_data = PLLVAL(148, 3, 3), }, /* FVco 1056.706560 */ + { .frequency = 138015360, .driver_data = PLLVAL(155, 3, 3), }, /* FVco 1104.122880 */ + { .frequency = 144789120, .driver_data = PLLVAL(163, 3, 3), }, /* FVco 1158.312960 */ + { .frequency = 150100363, .driver_data = PLLVAL(187, 9, 2), }, /* FVco 600.401454 */ + { .frequency = 156038400, .driver_data = PLLVAL(121, 5, 2), }, /* FVco 624.153600 */ + { .frequency = 162086400, .driver_data = PLLVAL(126, 5, 2), }, /* FVco 648.345600 */ + { .frequency = 168134400, .driver_data = PLLVAL(131, 5, 2), }, /* FVco 672.537600 */ + { .frequency = 174048000, .driver_data = PLLVAL(177, 7, 2), }, /* FVco 696.192000 */ + { .frequency = 180230400, .driver_data = PLLVAL(141, 5, 2), }, /* FVco 720.921600 */ + { .frequency = 186278400, .driver_data = PLLVAL(124, 4, 2), }, /* FVco 745.113600 */ + { .frequency = 192326400, .driver_data = PLLVAL(151, 5, 2), }, /* FVco 769.305600 */ + { .frequency = 198132480, .driver_data = PLLVAL(109, 3, 2), }, /* FVco 792.529920 */ + { .frequency = 204271200, .driver_data = PLLVAL(185, 6, 2), }, /* FVco 817.084800 */ + { .frequency = 210268800, .driver_data = PLLVAL(141, 4, 2), }, /* FVco 841.075200 */ + { .frequency = 216518400, .driver_data = PLLVAL(171, 5, 2), }, /* FVco 866.073600 */ + { .frequency = 222264000, .driver_data = PLLVAL(97, 2, 2), }, /* FVco 889.056000 */ + { .frequency = 228614400, .driver_data = PLLVAL(127, 3, 2), }, /* FVco 914.457600 */ + { .frequency = 234259200, .driver_data = PLLVAL(158, 4, 2), }, /* FVco 937.036800 */ + { .frequency = 240468480, .driver_data = PLLVAL(134, 3, 2), }, /* FVco 961.873920 */ + { .frequency = 246960000, .driver_data = PLLVAL(167, 4, 2), }, /* FVco 987.840000 */ + { .frequency = 252322560, .driver_data = PLLVAL(141, 3, 2), }, /* FVco 1009.290240 */ + { .frequency = 258249600, .driver_data = PLLVAL(114, 2, 2), }, /* FVco 1032.998400 */ + { .frequency = 264176640, .driver_data = PLLVAL(148, 3, 2), }, /* FVco 1056.706560 */ + { .frequency = 270950400, .driver_data = PLLVAL(120, 2, 2), }, /* FVco 1083.801600 */ + { .frequency = 276030720, .driver_data = PLLVAL(155, 3, 2), }, /* FVco 1104.122880 */ + { .frequency = 282240000, .driver_data = PLLVAL(92, 1, 2), }, /* FVco 1128.960000 */ + { .frequency = 289578240, .driver_data = PLLVAL(163, 3, 2), }, /* FVco 1158.312960 */ + { .frequency = 294235200, .driver_data = PLLVAL(131, 2, 2), }, /* FVco 1176.940800 */ + { .frequency = 300200727, .driver_data = PLLVAL(187, 9, 1), }, /* FVco 600.401454 */ + { .frequency = 306358690, .driver_data = PLLVAL(191, 9, 1), }, /* FVco 612.717380 */ + { .frequency = 312076800, .driver_data = PLLVAL(121, 5, 1), }, /* FVco 624.153600 */ + { .frequency = 318366720, .driver_data = PLLVAL(86, 3, 1), }, /* FVco 636.733440 */ + { .frequency = 324172800, .driver_data = PLLVAL(126, 5, 1), }, /* FVco 648.345600 */ + { .frequency = 330220800, .driver_data = PLLVAL(109, 4, 1), }, /* FVco 660.441600 */ + { .frequency = 336268800, .driver_data = PLLVAL(131, 5, 1), }, /* FVco 672.537600 */ + { .frequency = 342074880, .driver_data = PLLVAL(93, 3, 1), }, /* FVco 684.149760 */ + { .frequency = 348096000, .driver_data = PLLVAL(177, 7, 1), }, /* FVco 696.192000 */ + { .frequency = 355622400, .driver_data = PLLVAL(118, 4, 1), }, /* FVco 711.244800 */ + { .frequency = 360460800, .driver_data = PLLVAL(141, 5, 1), }, /* FVco 720.921600 */ + { .frequency = 366206400, .driver_data = PLLVAL(165, 6, 1), }, /* FVco 732.412800 */ + { .frequency = 372556800, .driver_data = PLLVAL(124, 4, 1), }, /* FVco 745.113600 */ + { .frequency = 378201600, .driver_data = PLLVAL(126, 4, 1), }, /* FVco 756.403200 */ + { .frequency = 384652800, .driver_data = PLLVAL(151, 5, 1), }, /* FVco 769.305600 */ + { .frequency = 391608000, .driver_data = PLLVAL(177, 6, 1), }, /* FVco 783.216000 */ + { .frequency = 396264960, .driver_data = PLLVAL(109, 3, 1), }, /* FVco 792.529920 */ + { .frequency = 402192000, .driver_data = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */ }; static int s3c2440_plls169344_add(struct device *dev, diff --git a/arch/arm/mach-s3c24xx/s3c2410.c b/arch/arm/mach-s3c24xx/s3c2410.c index ff384acc65b2..34676d1d5fec 100644 --- a/arch/arm/mach-s3c24xx/s3c2410.c +++ b/arch/arm/mach-s3c24xx/s3c2410.c @@ -22,6 +22,7 @@ #include <linux/syscore_ops.h> #include <linux/serial_core.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/io.h> #include <asm/mach/arch.h> @@ -196,9 +197,9 @@ int __init s3c2410a_init(void) return s3c2410_init(); } -void s3c2410_restart(char mode, const char *cmd) +void s3c2410_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { soft_restart(0); } diff --git a/arch/arm/mach-s3c24xx/s3c2412.c b/arch/arm/mach-s3c24xx/s3c2412.c index 0f864d4c97de..0251650cbf80 100644 --- a/arch/arm/mach-s3c24xx/s3c2412.c +++ b/arch/arm/mach-s3c24xx/s3c2412.c @@ -22,6 +22,7 @@ #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/reboot.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -129,9 +130,9 @@ static void s3c2412_idle(void) cpu_do_idle(); } -void s3c2412_restart(char mode, const char *cmd) +void s3c2412_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') + if (mode == REBOOT_SOFT) soft_restart(0); /* errata "Watch-dog/Software Reset Problem" specifies that diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c index b9c5d382dafb..9ef3ccfbe196 100644 --- a/arch/arm/mach-s3c24xx/s3c2416.c +++ b/arch/arm/mach-s3c24xx/s3c2416.c @@ -35,6 +35,7 @@ #include <linux/syscore_ops.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/reboot.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -79,9 +80,9 @@ static struct device s3c2416_dev = { .bus = &s3c2416_subsys, }; -void s3c2416_restart(char mode, const char *cmd) +void s3c2416_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') + if (mode == REBOOT_SOFT) soft_restart(0); __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST); diff --git a/arch/arm/mach-s3c24xx/s3c2443.c b/arch/arm/mach-s3c24xx/s3c2443.c index 8328cd65bf3d..b6c71918b25c 100644 --- a/arch/arm/mach-s3c24xx/s3c2443.c +++ b/arch/arm/mach-s3c24xx/s3c2443.c @@ -22,6 +22,7 @@ #include <linux/device.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/reboot.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -59,9 +60,9 @@ static struct device s3c2443_dev = { .bus = &s3c2443_subsys, }; -void s3c2443_restart(char mode, const char *cmd) +void s3c2443_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') + if (mode == REBOOT_SOFT) soft_restart(0); __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST); diff --git a/arch/arm/mach-s3c24xx/s3c244x.c b/arch/arm/mach-s3c24xx/s3c244x.c index d0423e2544c1..911b555029fc 100644 --- a/arch/arm/mach-s3c24xx/s3c244x.c +++ b/arch/arm/mach-s3c24xx/s3c244x.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/serial_core.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/device.h> #include <linux/syscore_ops.h> #include <linux/clk.h> @@ -198,9 +199,9 @@ struct syscore_ops s3c244x_pm_syscore_ops = { .resume = s3c244x_resume, }; -void s3c244x_restart(char mode, const char *cmd) +void s3c244x_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') + if (mode == REBOOT_SOFT) soft_restart(0); samsung_wdt_reset(); diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index 1aed6f4be1ce..3f62e467b129 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c @@ -21,6 +21,7 @@ #include <linux/ioport.h> #include <linux/serial_core.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/irq.h> @@ -381,9 +382,9 @@ static int __init s3c64xx_init_irq_eint(void) } arch_initcall(s3c64xx_init_irq_eint); -void s3c64xx_restart(char mode, const char *cmd) +void s3c64xx_restart(enum reboot_mode mode, const char *cmd) { - if (mode != 's') + if (mode != REBOOT_SOFT) samsung_wdt_reset(); /* if all else fails, or mode was for soft, jump to 0 */ diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h index 6cfc99bdfb37..e8f990b37665 100644 --- a/arch/arm/mach-s3c64xx/common.h +++ b/arch/arm/mach-s3c64xx/common.h @@ -17,13 +17,15 @@ #ifndef __ARCH_ARM_MACH_S3C64XX_COMMON_H #define __ARCH_ARM_MACH_S3C64XX_COMMON_H +#include <linux/reboot.h> + void s3c64xx_init_irq(u32 vic0, u32 vic1); void s3c64xx_init_io(struct map_desc *mach_desc, int size); void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit); void s3c64xx_setup_clocks(void); -void s3c64xx_restart(char mode, const char *cmd); +void s3c64xx_restart(enum reboot_mode mode, const char *cmd); void s3c64xx_init_late(void); #ifdef CONFIG_CPU_S3C6400 diff --git a/arch/arm/mach-s5p64x0/common.c b/arch/arm/mach-s5p64x0/common.c index 76d0053bf564..dfdfdc320ce7 100644 --- a/arch/arm/mach-s5p64x0/common.c +++ b/arch/arm/mach-s5p64x0/common.c @@ -24,6 +24,7 @@ #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/irq.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/proc-fns.h> @@ -439,9 +440,9 @@ static int __init s5p64x0_init_irq_eint(void) } arch_initcall(s5p64x0_init_irq_eint); -void s5p64x0_restart(char mode, const char *cmd) +void s5p64x0_restart(enum reboot_mode mode, const char *cmd) { - if (mode != 's') + if (mode != REBOOT_SOFT) samsung_wdt_reset(); soft_restart(0); diff --git a/arch/arm/mach-s5p64x0/common.h b/arch/arm/mach-s5p64x0/common.h index f8a60fdc5884..f3a9b43cba4a 100644 --- a/arch/arm/mach-s5p64x0/common.h +++ b/arch/arm/mach-s5p64x0/common.h @@ -12,6 +12,8 @@ #ifndef __ARCH_ARM_MACH_S5P64X0_COMMON_H #define __ARCH_ARM_MACH_S5P64X0_COMMON_H +#include <linux/reboot.h> + void s5p6440_init_irq(void); void s5p6450_init_irq(void); void s5p64x0_init_io(struct map_desc *mach_desc, int size); @@ -22,7 +24,7 @@ void s5p6440_setup_clocks(void); void s5p6450_register_clocks(void); void s5p6450_setup_clocks(void); -void s5p64x0_restart(char mode, const char *cmd); +void s5p64x0_restart(enum reboot_mode mode, const char *cmd); #ifdef CONFIG_CPU_S5P6440 diff --git a/arch/arm/mach-s5pc100/common.c b/arch/arm/mach-s5pc100/common.c index 511031564d35..4bdfecf6d024 100644 --- a/arch/arm/mach-s5pc100/common.c +++ b/arch/arm/mach-s5pc100/common.c @@ -24,6 +24,7 @@ #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/sched.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/proc-fns.h> @@ -217,9 +218,9 @@ void __init s5pc100_init_uarts(struct s3c2410_uartcfg *cfg, int no) s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no); } -void s5pc100_restart(char mode, const char *cmd) +void s5pc100_restart(enum reboot_mode mode, const char *cmd) { - if (mode != 's') + if (mode != REBOOT_SOFT) samsung_wdt_reset(); soft_restart(0); diff --git a/arch/arm/mach-s5pc100/common.h b/arch/arm/mach-s5pc100/common.h index c41f912e9e1f..08d782d65d7b 100644 --- a/arch/arm/mach-s5pc100/common.h +++ b/arch/arm/mach-s5pc100/common.h @@ -12,13 +12,15 @@ #ifndef __ARCH_ARM_MACH_S5PC100_COMMON_H #define __ARCH_ARM_MACH_S5PC100_COMMON_H +#include <linux/reboot.h> + void s5pc100_init_io(struct map_desc *mach_desc, int size); void s5pc100_init_irq(void); void s5pc100_register_clocks(void); void s5pc100_setup_clocks(void); -void s5pc100_restart(char mode, const char *cmd); +void s5pc100_restart(enum reboot_mode mode, const char *cmd); extern int s5pc100_init(void); extern void s5pc100_map_io(void); diff --git a/arch/arm/mach-s5pv210/common.c b/arch/arm/mach-s5pv210/common.c index 9dfe93e2624d..023f1a796a9c 100644 --- a/arch/arm/mach-s5pv210/common.c +++ b/arch/arm/mach-s5pv210/common.c @@ -143,7 +143,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = { } }; -void s5pv210_restart(char mode, const char *cmd) +void s5pv210_restart(enum reboot_mode mode, const char *cmd) { __raw_writel(0x1, S5P_SWRESET); } diff --git a/arch/arm/mach-s5pv210/common.h b/arch/arm/mach-s5pv210/common.h index 0a1cc0aef720..fe1beb54e548 100644 --- a/arch/arm/mach-s5pv210/common.h +++ b/arch/arm/mach-s5pv210/common.h @@ -12,13 +12,15 @@ #ifndef __ARCH_ARM_MACH_S5PV210_COMMON_H #define __ARCH_ARM_MACH_S5PV210_COMMON_H +#include <linux/reboot.h> + void s5pv210_init_io(struct map_desc *mach_desc, int size); void s5pv210_init_irq(void); void s5pv210_register_clocks(void); void s5pv210_setup_clocks(void); -void s5pv210_restart(char mode, const char *cmd); +void s5pv210_restart(enum reboot_mode mode, const char *cmd); extern int s5pv210_init(void); extern void s5pv210_map_io(void); diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c index ed2b85485b9d..ad40ab0f5dbd 100644 --- a/arch/arm/mach-s5pv210/mach-aquila.c +++ b/arch/arm/mach-s5pv210/mach-aquila.c @@ -377,12 +377,8 @@ static struct max8998_platform_data aquila_max8998_pdata = { .buck1_set1 = S5PV210_GPH0(3), .buck1_set2 = S5PV210_GPH0(4), .buck2_set3 = S5PV210_GPH0(5), - .buck1_voltage1 = 1200000, - .buck1_voltage2 = 1200000, - .buck1_voltage3 = 1200000, - .buck1_voltage4 = 1200000, - .buck2_voltage1 = 1200000, - .buck2_voltage2 = 1200000, + .buck1_voltage = { 1200000, 1200000, 1200000, 1200000 }, + .buck2_voltage = { 1200000, 1200000 }, }; #endif diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index 30b24ad84f49..e5cd9fbf19e9 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c @@ -580,12 +580,8 @@ static struct max8998_platform_data goni_max8998_pdata = { .buck1_set1 = S5PV210_GPH0(3), .buck1_set2 = S5PV210_GPH0(4), .buck2_set3 = S5PV210_GPH0(5), - .buck1_voltage1 = 1200000, - .buck1_voltage2 = 1200000, - .buck1_voltage3 = 1200000, - .buck1_voltage4 = 1200000, - .buck2_voltage1 = 1200000, - .buck2_voltage2 = 1200000, + .buck1_voltage = { 1200000, 1200000, 1200000, 1200000 }, + .buck2_voltage = { 1200000, 1200000 }, }; #endif diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 9db3e98e8b85..f25b6119e028 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -19,6 +19,7 @@ #include <linux/cpufreq.h> #include <linux/ioport.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <video/sa1100fb.h> @@ -131,9 +132,9 @@ static void sa1100_power_off(void) PMCR = PMCR_SF; } -void sa11x0_restart(char mode, const char *cmd) +void sa11x0_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* Jump into ROM at address 0 */ soft_restart(0); } else { diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h index 2abc6a1f6e86..9a33695c9492 100644 --- a/arch/arm/mach-sa1100/generic.h +++ b/arch/arm/mach-sa1100/generic.h @@ -3,12 +3,13 @@ * * Author: Nicolas Pitre */ +#include <linux/reboot.h> extern void sa1100_timer_init(void); extern void __init sa1100_map_io(void); extern void __init sa1100_init_irq(void); extern void __init sa1100_init_gpio(void); -extern void sa11x0_restart(char, const char *); +extern void sa11x0_restart(enum reboot_mode, const char *); extern void sa11x0_init_late(void); #define SET_BANK(__nr,__start,__size) \ diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c index a59a13a665a6..713c86cd3d64 100644 --- a/arch/arm/mach-sa1100/time.c +++ b/arch/arm/mach-sa1100/time.c @@ -14,9 +14,9 @@ #include <linux/irq.h> #include <linux/timex.h> #include <linux/clockchips.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> -#include <asm/sched_clock.h> #include <mach/hardware.h> #include <mach/irqs.h> diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c index 153555724988..1d32c5e8eab6 100644 --- a/arch/arm/mach-shark/core.c +++ b/arch/arm/mach-shark/core.c @@ -11,6 +11,7 @@ #include <linux/serial_8250.h> #include <linux/io.h> #include <linux/cpu.h> +#include <linux/reboot.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -24,7 +25,7 @@ #define ROMCARD_SIZE 0x08000000 #define ROMCARD_START 0x10000000 -static void shark_restart(char mode, const char *cmd) +static void shark_restart(enum reboot_mode mode, const char *cmd) { short temp; /* Reset the Machine via pc[3] of the sequoia chipset */ diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 44a621505eeb..e115f6742107 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -42,6 +42,7 @@ #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/i2c-gpio.h> +#include <linux/reboot.h> #include <mach/common.h> #include <mach/irqs.h> #include <mach/r8a7740.h> @@ -377,7 +378,7 @@ static struct resource sh_eth_resources[] = { }; static struct platform_device sh_eth_device = { - .name = "sh-eth", + .name = "r8a7740-gether", .id = -1, .dev = { .platform_data = &sh_eth_platdata, @@ -1259,7 +1260,7 @@ static void __init eva_add_early_devices(void) } #define RESCNT2 IOMEM(0xe6188020) -static void eva_restart(char mode, const char *cmd) +static void eva_restart(enum reboot_mode mode, const char *cmd) { /* Do soft power on reset */ writel((1 << 31), RESCNT2); diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index 165483c9bee2..1068120d339f 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c @@ -34,6 +34,7 @@ #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/smsc911x.h> @@ -890,7 +891,7 @@ static void __init kzm_init(void) sh73a0_pm_init(); } -static void kzm9g_restart(char mode, const char *cmd) +static void kzm9g_restart(enum reboot_mode mode, const char *cmd) { #define RESCNT2 IOMEM(0xe6188020) /* Do soft power on reset */ diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c index 7fd32d604e34..de10fd78bf2b 100644 --- a/arch/arm/mach-shmobile/clock-r8a7740.c +++ b/arch/arm/mach-shmobile/clock-r8a7740.c @@ -594,7 +594,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("e6860000.sdhi", &mstp_clks[MSTP313]), CLKDEV_DEV_ID("sh_mmcif", &mstp_clks[MSTP312]), CLKDEV_DEV_ID("e6bd0000.mmcif", &mstp_clks[MSTP312]), - CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP309]), + CLKDEV_DEV_ID("r8a7740-gether", &mstp_clks[MSTP309]), CLKDEV_DEV_ID("e9a00000.sh-eth", &mstp_clks[MSTP309]), CLKDEV_DEV_ID("renesas_tpu_pwm", &mstp_clks[MSTP304]), diff --git a/arch/arm/mach-shmobile/clock-r8a7778.c b/arch/arm/mach-shmobile/clock-r8a7778.c index 53798e5037d7..a0e9eb72e46d 100644 --- a/arch/arm/mach-shmobile/clock-r8a7778.c +++ b/arch/arm/mach-shmobile/clock-r8a7778.c @@ -145,7 +145,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP323]), /* SDHI0 */ CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP322]), /* SDHI1 */ CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP321]), /* SDHI2 */ - CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP114]), /* Ether */ + CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */ CLKDEV_DEV_ID("ehci-platform", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */ CLKDEV_DEV_ID("ohci-platform", &mstp_clks[MSTP100]), /* USB OHCI port0/1 */ CLKDEV_DEV_ID("i2c-rcar.0", &mstp_clks[MSTP030]), /* I2C0 */ diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c index 9daeb8c37483..10340f5becbb 100644 --- a/arch/arm/mach-shmobile/clock-r8a7779.c +++ b/arch/arm/mach-shmobile/clock-r8a7779.c @@ -165,7 +165,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("rcar-pcie", &mstp_clks[MSTP116]), /* PCIe */ CLKDEV_DEV_ID("sata_rcar", &mstp_clks[MSTP115]), /* SATA */ CLKDEV_DEV_ID("fc600000.sata", &mstp_clks[MSTP115]), /* SATA w/DT */ - CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP114]), /* Ether */ + CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */ CLKDEV_DEV_ID("ehci-platform.1", &mstp_clks[MSTP101]), /* USB EHCI port2 */ CLKDEV_DEV_ID("ohci-platform.1", &mstp_clks[MSTP101]), /* USB OHCI port2 */ CLKDEV_DEV_ID("ehci-platform.0", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */ diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index 7e105932c09d..5390c6bbbc02 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c @@ -142,15 +142,15 @@ static void pllc2_table_rebuild(struct clk *clk) /* Initialise PLLC2 frequency table */ for (i = 0; i < ARRAY_SIZE(pllc2_freq_table) - 2; i++) { pllc2_freq_table[i].frequency = clk->parent->rate * (i + 20) * 2; - pllc2_freq_table[i].index = i; + pllc2_freq_table[i].driver_data = i; } /* This is a special entry - switching PLL off makes it a repeater */ pllc2_freq_table[i].frequency = clk->parent->rate; - pllc2_freq_table[i].index = i; + pllc2_freq_table[i].driver_data = i; pllc2_freq_table[++i].frequency = CPUFREQ_TABLE_END; - pllc2_freq_table[i].index = i; + pllc2_freq_table[i].driver_data = i; } static unsigned long pllc2_recalc(struct clk *clk) diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c index 899a86c31ec9..1ccddd228112 100644 --- a/arch/arm/mach-shmobile/setup-emev2.c +++ b/arch/arm/mach-shmobile/setup-emev2.c @@ -287,14 +287,14 @@ static struct gpio_em_config gio3_config = { static struct resource gio3_resources[] = { [0] = { .name = "GIO_096", - .start = 0xe0050100, - .end = 0xe005012b, + .start = 0xe0050180, + .end = 0xe00501ab, .flags = IORESOURCE_MEM, }, [1] = { .name = "GIO_096", - .start = 0xe0050140, - .end = 0xe005015f, + .start = 0xe00501c0, + .end = 0xe00501df, .flags = IORESOURCE_MEM, }, [2] = { diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c index c5a75a7a508f..7f45c2edbca9 100644 --- a/arch/arm/mach-shmobile/setup-r8a73a4.c +++ b/arch/arm/mach-shmobile/setup-r8a73a4.c @@ -62,7 +62,7 @@ enum { SCIFA0, SCIFA1, SCIFB0, SCIFB1, SCIFB2, SCIFB3 }; static const struct plat_sci_port scif[] = { SCIFA_DATA(SCIFA0, 0xe6c40000, gic_spi(144)), /* SCIFA0 */ SCIFA_DATA(SCIFA1, 0xe6c50000, gic_spi(145)), /* SCIFA1 */ - SCIFB_DATA(SCIFB0, 0xe6c50000, gic_spi(145)), /* SCIFB0 */ + SCIFB_DATA(SCIFB0, 0xe6c20000, gic_spi(148)), /* SCIFB0 */ SCIFB_DATA(SCIFB1, 0xe6c30000, gic_spi(149)), /* SCIFB1 */ SCIFB_DATA(SCIFB2, 0xe6ce0000, gic_spi(150)), /* SCIFB2 */ SCIFB_DATA(SCIFB3, 0xe6cf0000, gic_spi(151)), /* SCIFB3 */ diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 8ea11b472b91..bfce9641e32f 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c @@ -19,6 +19,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/reboot.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> @@ -89,13 +90,13 @@ static void __init socfpga_init_irq(void) socfpga_sysmgr_init(); } -static void socfpga_cyclone5_restart(char mode, const char *cmd) +static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) { u32 temp; temp = readl(rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); - if (mode == 'h') + if (mode == REBOOT_HARD) temp |= RSTMGR_CTRL_SWCOLDRSTREQ; else temp |= RSTMGR_CTRL_SWWARMRSTREQ; diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h index a9fd45362fee..904f2c907b46 100644 --- a/arch/arm/mach-spear/generic.h +++ b/arch/arm/mach-spear/generic.h @@ -16,6 +16,8 @@ #include <linux/dmaengine.h> #include <linux/amba/pl08x.h> #include <linux/init.h> +#include <linux/reboot.h> + #include <asm/mach/time.h> extern void spear13xx_timer_init(void); @@ -32,7 +34,7 @@ void __init spear6xx_clk_init(void __iomem *misc_base); void __init spear13xx_map_io(void); void __init spear13xx_l2x0_init(void); -void spear_restart(char, const char *); +void spear_restart(enum reboot_mode, const char *); void spear13xx_secondary_startup(void); void __cpuinit spear13xx_cpu_die(unsigned int cpu); diff --git a/arch/arm/mach-spear/restart.c b/arch/arm/mach-spear/restart.c index 2b44500bb718..ce5e098c4888 100644 --- a/arch/arm/mach-spear/restart.c +++ b/arch/arm/mach-spear/restart.c @@ -12,14 +12,15 @@ */ #include <linux/io.h> #include <linux/amba/sp810.h> +#include <linux/reboot.h> #include <asm/system_misc.h> #include <mach/spear.h> #include "generic.h" #define SPEAR13XX_SYS_SW_RES (VA_MISC_BASE + 0x204) -void spear_restart(char mode, const char *cmd) +void spear_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* software reset, Jump into ROM at address 0 */ soft_restart(0); } else { diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c index 0227c97797cd..bf3b1fd8cb23 100644 --- a/arch/arm/mach-spear/spear3xx.c +++ b/arch/arm/mach-spear/spear3xx.c @@ -56,8 +56,8 @@ struct pl08x_platform_data pl080_plat_data = { }, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, - .get_signal = pl080_get_signal, - .put_signal = pl080_put_signal, + .get_xfer_signal = pl080_get_signal, + .put_xfer_signal = pl080_put_signal, }; /* diff --git a/arch/arm/mach-spear/spear6xx.c b/arch/arm/mach-spear/spear6xx.c index 8b0295a41226..da26fa5b68d7 100644 --- a/arch/arm/mach-spear/spear6xx.c +++ b/arch/arm/mach-spear/spear6xx.c @@ -334,8 +334,8 @@ static struct pl08x_platform_data spear6xx_pl080_plat_data = { }, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, - .get_signal = pl080_get_signal, - .put_signal = pl080_put_signal, + .get_xfer_signal = pl080_get_signal, + .put_xfer_signal = pl080_put_signal, .slave_channels = spear600_dma_info, .num_slave_channels = ARRAY_SIZE(spear600_dma_info), }; diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 84485a10fc3a..38a3c55527c8 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c @@ -18,6 +18,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/io.h> +#include <linux/reboot.h> #include <linux/clk/sunxi.h> @@ -33,7 +34,7 @@ static void __iomem *wdt_base; -static void sun4i_restart(char mode, const char *cmd) +static void sun4i_restart(enum reboot_mode mode, const char *cmd) { if (!wdt_base) return; diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 65c5ae6fa386..ef3a8da49b2d 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -28,7 +28,6 @@ config ARCH_TEGRA_2x_SOC select ARM_ERRATA_754327 if SMP select ARM_ERRATA_764369 if SMP select ARM_GIC - select CPU_FREQ_TABLE if CPU_FREQ select CPU_V7 select PINCTRL select PINCTRL_TEGRA20 @@ -46,7 +45,6 @@ config ARCH_TEGRA_3x_SOC select ARM_ERRATA_754322 select ARM_ERRATA_764369 if SMP select ARM_GIC - select CPU_FREQ_TABLE if CPU_FREQ select CPU_V7 select PINCTRL select PINCTRL_TEGRA30 @@ -63,7 +61,6 @@ config ARCH_TEGRA_114_SOC select HAVE_ARM_ARCH_TIMER select ARM_GIC select ARM_L1_CACHE_SHIFT_6 - select CPU_FREQ_TABLE if CPU_FREQ select CPU_V7 select PINCTRL select PINCTRL_TEGRA114 diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h index 1787327fae3a..9a6659fe2dc2 100644 --- a/arch/arm/mach-tegra/board.h +++ b/arch/arm/mach-tegra/board.h @@ -23,8 +23,9 @@ #define __MACH_TEGRA_BOARD_H #include <linux/types.h> +#include <linux/reboot.h> -void tegra_assert_system_reset(char mode, const char *cmd); +void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd); void __init tegra_init_early(void); void __init tegra_map_common_io(void); diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c index b25153e2ebaa..94a119a35af8 100644 --- a/arch/arm/mach-tegra/common.c +++ b/arch/arm/mach-tegra/common.c @@ -22,6 +22,7 @@ #include <linux/io.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/reboot.h> #include <linux/irqchip.h> #include <linux/clk-provider.h> @@ -68,7 +69,7 @@ void __init tegra_dt_init_irq(void) } #endif -void tegra_assert_system_reset(char mode, const char *cmd) +void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd) { void __iomem *reset = IO_ADDRESS(TEGRA_PMC_BASE + 0); u32 reg; diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index 4f7ac2a11452..35670b15f281 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c @@ -300,11 +300,11 @@ static void __init u300_init_check_chip(void) /* Forward declare this function from the watchdog */ void coh901327_watchdog_reset(void); -static void u300_restart(char mode, const char *cmd) +static void u300_restart(enum reboot_mode mode, const char *cmd) { switch (mode) { - case 's': - case 'h': + case REBOOT_SOFT: + case REBOOT_HARD: #ifdef CONFIG_COH901327_WATCHDOG coh901327_watchdog_reset(); #endif diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c index 390ae5feb1d0..b5db207dfd1e 100644 --- a/arch/arm/mach-u300/timer.c +++ b/arch/arm/mach-u300/timer.c @@ -21,9 +21,9 @@ #include <linux/delay.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched_clock.h> /* Generic stuff */ -#include <asm/sched_clock.h> #include <asm/mach/map.h> #include <asm/mach/time.h> diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 54bb80b012ac..3b0572f30d56 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -38,6 +38,7 @@ #include <linux/clkdev.h> #include <linux/mtd/physmap.h> #include <linux/bitops.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/hardware/arm_timer.h> @@ -733,7 +734,7 @@ static void versatile_leds_event(led_event_t ledevt) } #endif /* CONFIG_LEDS */ -void versatile_restart(char mode, const char *cmd) +void versatile_restart(enum reboot_mode mode, const char *cmd) { void __iomem *sys = __io_address(VERSATILE_SYS_BASE); u32 val; diff --git a/arch/arm/mach-versatile/core.h b/arch/arm/mach-versatile/core.h index 5c1b87d1da6b..f06d5768e428 100644 --- a/arch/arm/mach-versatile/core.h +++ b/arch/arm/mach-versatile/core.h @@ -24,13 +24,14 @@ #include <linux/amba/bus.h> #include <linux/of_platform.h> +#include <linux/reboot.h> extern void __init versatile_init(void); extern void __init versatile_init_early(void); extern void __init versatile_init_irq(void); extern void __init versatile_map_io(void); extern void versatile_timer_init(void); -extern void versatile_restart(char, const char *); +extern void versatile_restart(enum reboot_mode, const char *); extern unsigned int mmc_status(struct device *dev); #ifdef CONFIG_OF extern struct of_dev_auxdata versatile_auxdata_lookup[]; diff --git a/arch/arm/mach-vt8500/vt8500.c b/arch/arm/mach-vt8500/vt8500.c index f8f2f00856e0..eefaa60d6614 100644 --- a/arch/arm/mach-vt8500/vt8500.c +++ b/arch/arm/mach-vt8500/vt8500.c @@ -21,6 +21,7 @@ #include <linux/clocksource.h> #include <linux/io.h> #include <linux/pm.h> +#include <linux/reboot.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -46,7 +47,7 @@ static void __iomem *pmc_base; -void vt8500_restart(char mode, const char *cmd) +void vt8500_restart(enum reboot_mode mode, const char *cmd) { if (pmc_base) writel(1, pmc_base + VT8500_PMSR_REG); diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c index 9e4dd8b63c4a..b1eabaad50a5 100644 --- a/arch/arm/mach-w90x900/cpu.c +++ b/arch/arm/mach-w90x900/cpu.c @@ -230,9 +230,9 @@ void __init nuc900_init_clocks(void) #define WTE (1 << 7) #define WTRE (1 << 1) -void nuc9xx_restart(char mode, const char *cmd) +void nuc9xx_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* Jump into ROM at address 0 */ soft_restart(0); } else { diff --git a/arch/arm/mach-w90x900/nuc9xx.h b/arch/arm/mach-w90x900/nuc9xx.h index 88ef4b267089..e3ab1e1381f1 100644 --- a/arch/arm/mach-w90x900/nuc9xx.h +++ b/arch/arm/mach-w90x900/nuc9xx.h @@ -14,10 +14,13 @@ * published by the Free Software Foundation. * */ + +#include <linux/reboot.h> + struct map_desc; /* core initialisation functions */ extern void nuc900_init_irq(void); extern void nuc900_timer_init(void); -extern void nuc9xx_restart(char, const char *); +extern void nuc9xx_restart(enum reboot_mode, const char *); diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 4130e65a0e3f..5b799c29886e 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c @@ -101,7 +101,7 @@ static const char * const zynq_dt_match[] = { NULL }; -MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") +DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") .smp = smp_ops(zynq_smp_ops), .map_io = zynq_map_io, .init_machine = zynq_init_machine, diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 19a19bd44447..dbddc07a3bbd 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1328,6 +1328,15 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, if (gfp & GFP_ATOMIC) return __iommu_alloc_atomic(dev, size, handle); + /* + * Following is a work-around (a.k.a. hack) to prevent pages + * with __GFP_COMP being passed to split_page() which cannot + * handle them. The real problem is that this flag probably + * should be 0 on ARM as it is not supported on this + * platform; see CONFIG_HUGETLBFS. + */ + gfp &= ~(__GFP_COMP); + pages = __iommu_alloc_buffer(dev, size, gfp, attrs); if (!pages) return NULL; @@ -1386,16 +1395,17 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { - struct page **pages = __iommu_get_pages(cpu_addr, attrs); + struct page **pages; size = PAGE_ALIGN(size); - if (!pages) { - WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); + if (__in_atomic_pool(cpu_addr, size)) { + __iommu_free_atomic(dev, cpu_addr, handle, size); return; } - if (__in_atomic_pool(cpu_addr, size)) { - __iommu_free_atomic(dev, cpu_addr, handle, size); + pages = __iommu_get_pages(cpu_addr, attrs); + if (!pages) { + WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); return; } @@ -1650,13 +1660,27 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t dma_addr; - int ret, len = PAGE_ALIGN(size + offset); + int ret, prot, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); if (dma_addr == DMA_ERROR_CODE) return dma_addr; - ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); + switch (dir) { + case DMA_BIDIRECTIONAL: + prot = IOMMU_READ | IOMMU_WRITE; + break; + case DMA_TO_DEVICE: + prot = IOMMU_READ; + break; + case DMA_FROM_DEVICE: + prot = IOMMU_WRITE; + break; + default: + prot = 0; + } + + ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); if (ret < 0) goto fail; @@ -1921,7 +1945,7 @@ void arm_iommu_detach_device(struct device *dev) iommu_detach_device(mapping->domain, dev); kref_put(&mapping->kref, release_iommu_mapping); - mapping = NULL; + dev->archdata.mapping = NULL; set_dma_ops(dev, NULL); pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2ffee02d1d5c..15225d829d71 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -583,9 +583,6 @@ static void __init free_highpages(void) */ void __init mem_init(void) { - unsigned long reserved_pages, free_pages; - struct memblock_region *reg; - int i; #ifdef CONFIG_HAVE_TCM /* These pointers are filled in on TCM detection */ extern u32 dtcm_end; @@ -596,57 +593,16 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ free_unused_memmap(&meminfo); - - totalram_pages += free_all_bootmem(); + free_all_bootmem(); #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); + free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); #endif free_highpages(); - reserved_pages = free_pages = 0; - - for_each_bank(i, &meminfo) { - struct membank *bank = &meminfo.bank[i]; - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - if (PageReserved(page)) - reserved_pages++; - else if (!page_count(page)) - free_pages++; - page++; - } while (page < end); - } - - /* - * Since our memory may not be contiguous, calculate the - * real number of pages we have in this system - */ - printk(KERN_INFO "Memory:"); - num_physpages = 0; - for_each_memblock(memory, reg) { - unsigned long pages = memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - num_physpages += pages; - printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); - } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - - printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", - nr_free_pages() << (PAGE_SHIFT-10), - free_pages << (PAGE_SHIFT-10), - reserved_pages << (PAGE_SHIFT-10), - totalhigh_pages << (PAGE_SHIFT-10)); + mem_init_print_info(NULL); #define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLM(b, t) b, t, ((t) - (b)) >> 20 @@ -712,7 +668,7 @@ void __init mem_init(void) BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); #endif - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get @@ -729,12 +685,12 @@ void free_initmem(void) extern char __tcm_start, __tcm_end; poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); - free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link"); + free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); #endif poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) - free_initmem_default(0); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -745,7 +701,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { poison_init_mem((void *)start, PAGE_ALIGN(end) - start); - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } } diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 10062ceadd1c..0c6356255fe3 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index d7229d28c7f8..4f56617a2392 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -950,7 +950,7 @@ void __init debug_ll_io_init(void) map.virtual &= PAGE_MASK; map.length = PAGE_SIZE; map.type = MT_DEVICE; - create_mapping(&map); + iotable_init(&map, 1); } #endif diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 1a643ee8e082..f50d223a0bd3 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -900,8 +900,7 @@ void bpf_jit_compile(struct sk_filter *fp) #endif alloc_size = 4 * ctx.idx; - ctx.target = module_alloc(max(sizeof(struct work_struct), - alloc_size)); + ctx.target = module_alloc(alloc_size); if (unlikely(ctx.target == NULL)) goto out; @@ -927,19 +926,8 @@ out: return; } -static void bpf_jit_free_worker(struct work_struct *work) -{ - module_free(NULL, work); -} - void bpf_jit_free(struct sk_filter *fp) { - struct work_struct *work; - - if (fp->bpf_func != sk_run_filter) { - work = (struct work_struct *)fp->bpf_func; - - INIT_WORK(work, bpf_jit_free_worker); - schedule_work(work); - } + if (fp->bpf_func != sk_run_filter) + module_free(NULL, fp->bpf_func); } diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index 1ff6a37e893c..a4d1f8de3b5b 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c @@ -192,12 +192,10 @@ static int __init iop3xx_adma_cap_init(void) #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); - dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #else dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); - dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #endif diff --git a/arch/arm/plat-iop/gpio.c b/arch/arm/plat-iop/gpio.c index e4de9be78feb..697de6dc4936 100644 --- a/arch/arm/plat-iop/gpio.c +++ b/arch/arm/plat-iop/gpio.c @@ -17,6 +17,7 @@ #include <linux/gpio.h> #include <linux/export.h> #include <asm/hardware/iop3xx.h> +#include <mach/gpio.h> void gpio_line_config(int line, int direction) { diff --git a/arch/arm/plat-iop/restart.c b/arch/arm/plat-iop/restart.c index 33fa699a4d28..3a4d5e5fde52 100644 --- a/arch/arm/plat-iop/restart.c +++ b/arch/arm/plat-iop/restart.c @@ -11,7 +11,7 @@ #include <asm/system_misc.h> #include <mach/hardware.h> -void iop3xx_restart(char mode, const char *cmd) +void iop3xx_restart(enum reboot_mode mode, const char *cmd) { *IOP3XX_PCSR = 0x30; diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c index 837a2d52e9db..29606bd75f3f 100644 --- a/arch/arm/plat-iop/time.c +++ b/arch/arm/plat-iop/time.c @@ -22,9 +22,9 @@ #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/export.h> +#include <linux/sched_clock.h> #include <mach/hardware.h> #include <asm/irq.h> -#include <asm/sched_clock.h> #include <asm/uaccess.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index 5b0b86bb34bb..d9bc98eb2a6b 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c @@ -18,9 +18,9 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/clocksource.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> -#include <asm/sched_clock.h> #include <plat/counter-32k.h> diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index c019b7aaf776..c66d163d7a2a 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -666,14 +666,9 @@ void __init orion_xor0_init(unsigned long mapbase_low, orion_xor0_shared_resources[3].start = irq_1; orion_xor0_shared_resources[3].end = irq_1; - /* - * two engines can't do memset simultaneously, this limitation - * satisfied by removing memset support from one of the engines. - */ dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask); dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask); - dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask); dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask); dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask); @@ -732,14 +727,9 @@ void __init orion_xor1_init(unsigned long mapbase_low, orion_xor1_shared_resources[3].start = irq_1; orion_xor1_shared_resources[3].end = irq_1; - /* - * two engines can't do memset simultaneously, this limitation - * satisfied by removing memset support from one of the engines. - */ dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask); dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask); - dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask); dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask); dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask); diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c index 5d5ac0f05422..9d2b2ac74938 100644 --- a/arch/arm/plat-orion/time.c +++ b/arch/arm/plat-orion/time.c @@ -16,7 +16,7 @@ #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/irq.h> -#include <asm/sched_clock.h> +#include <linux/sched_clock.h> /* * MBus bridge block registers. diff --git a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h index d7e17150028a..7231c8e4975e 100644 --- a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h +++ b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h @@ -285,7 +285,7 @@ static inline int s3c_cpufreq_addfreq(struct cpufreq_frequency_table *table, s3c_freq_dbg("%s: { %d = %u kHz }\n", __func__, index, freq); - table[index].index = index; + table[index].driver_data = index; table[index].frequency = freq; } diff --git a/arch/arm/plat-samsung/samsung-time.c b/arch/arm/plat-samsung/samsung-time.c index f899cbc9b288..2957075ca836 100644 --- a/arch/arm/plat-samsung/samsung-time.c +++ b/arch/arm/plat-samsung/samsung-time.c @@ -15,12 +15,12 @@ #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/platform_device.h> +#include <linux/sched_clock.h> #include <asm/smp_twd.h> #include <asm/mach/time.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> -#include <asm/sched_clock.h> #include <mach/map.h> #include <plat/devs.h> diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c index b33b74c87232..51b109e3b6c3 100644 --- a/arch/arm/plat-versatile/sched-clock.c +++ b/arch/arm/plat-versatile/sched-clock.c @@ -20,8 +20,8 @@ */ #include <linux/kernel.h> #include <linux/io.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <plat/sched_clock.h> static void __iomem *ctr; diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 13609e01f4b7..f71c37edca26 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -314,4 +314,5 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); +EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op); EXPORT_SYMBOL_GPL(privcmd_call); diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index 199cb2da7663..d1cf7b7c2200 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S @@ -88,6 +88,7 @@ HYPERCALL2(hvm_op); HYPERCALL2(memory_op); HYPERCALL2(physdev_op); HYPERCALL3(vcpu_op); +HYPERCALL1(tmem_op); ENTRY(privcmd_call) stmdb sp!, {r4} diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f497ca77925a..67e8d7ce3fe7 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -197,14 +197,6 @@ void __init bootmem_init(void) max_pfn = max_low_pfn = max; } -/* - * Poison init memory with an undefined instruction (0x0). - */ -static inline void poison_init_mem(void *s, size_t count) -{ - memset(s, 0, count); -} - #ifndef CONFIG_SPARSEMEM_VMEMMAP static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { @@ -280,59 +272,17 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { - unsigned long reserved_pages, free_pages; - struct memblock_region *reg; - arm64_swiotlb_init(); max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; #ifndef CONFIG_SPARSEMEM_VMEMMAP - /* this will put all unused low memory onto the freelists */ free_unused_memmap(); #endif + /* this will put all unused low memory onto the freelists */ + free_all_bootmem(); - totalram_pages += free_all_bootmem(); - - reserved_pages = free_pages = 0; - - for_each_memblock(memory, reg) { - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = __phys_to_pfn(reg->base); - pfn2 = pfn1 + __phys_to_pfn(reg->size); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - if (PageReserved(page)) - reserved_pages++; - else if (!page_count(page)) - free_pages++; - page++; - } while (page < end); - } - - /* - * Since our memory may not be contiguous, calculate the real number - * of pages we have in this system. - */ - pr_info("Memory:"); - num_physpages = 0; - for_each_memblock(memory, reg) { - unsigned long pages = memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - num_physpages += pages; - printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); - } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - - pr_notice("Memory: %luk/%luk available, %luk reserved\n", - nr_free_pages() << (PAGE_SHIFT-10), - free_pages << (PAGE_SHIFT-10), - reserved_pages << (PAGE_SHIFT-10)); + mem_init_print_info(NULL); #define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLM(b, t) b, t, ((t) - (b)) >> 20 @@ -374,7 +324,7 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR); BUG_ON(TASK_SIZE_64 > MODULES_VADDR); - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get anywhere without @@ -386,7 +336,6 @@ void __init mem_init(void) void free_initmem(void) { - poison_init_mem(__init_begin, __init_end - __init_begin); free_initmem_default(0); } @@ -396,10 +345,8 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) { - poison_init_mem((void *)start, PAGE_ALIGN(end) - start); - free_reserved_area(start, end, 0, "initrd"); - } + if (!keep_initrd) + free_reserved_area((void *)start, (void *)end, 0, "initrd"); } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 7c7be7855638..8ed6cb1a900f 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -90,11 +90,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 2816c479cd49..531342ec4bcf 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -79,6 +79,7 @@ HYPERCALL2(hvm_op); HYPERCALL2(memory_op); HYPERCALL2(physdev_op); HYPERCALL3(vcpu_op); +HYPERCALL1(tmem_op); ENTRY(privcmd_call) mov x16, x0 diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h index 37401f535126..11c4259c62fb 100644 --- a/arch/avr32/include/uapi/asm/socket.h +++ b/arch/avr32/include/uapi/asm/socket.h @@ -74,4 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* __ASM_AVR32_SOCKET_H */ diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index b4247f478065..209ae5ad3495 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c @@ -555,7 +555,7 @@ void __init setup_arch (char **cmdline_p) { struct clk *cpu_clk; - init_mm.start_code = (unsigned long)_text; + init_mm.start_code = (unsigned long)_stext; init_mm.end_code = (unsigned long)_etext; init_mm.end_data = (unsigned long)_edata; init_mm.brk = (unsigned long)_end; diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S index 9cd2bd91d64a..a4589176bed5 100644 --- a/arch/avr32/kernel/vmlinux.lds.S +++ b/arch/avr32/kernel/vmlinux.lds.S @@ -23,7 +23,7 @@ SECTIONS { . = CONFIG_ENTRY_ADDRESS; .init : AT(ADDR(.init) - LOAD_OFFSET) { - _stext = .; + _text = .; __init_begin = .; _sinittext = .; *(.text.reset) @@ -46,7 +46,7 @@ SECTIONS .text : AT(ADDR(.text) - LOAD_OFFSET) { _evba = .; - _text = .; + _stext = .; *(.ex.text) *(.irq.text) KPROBES_TEXT diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c index e66e8406f992..def5391d927a 100644 --- a/arch/avr32/mm/init.c +++ b/arch/avr32/mm/init.c @@ -100,60 +100,26 @@ void __init paging_init(void) void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int nid, i; + pg_data_t *pgdat; - reservedpages = 0; high_memory = NULL; + for_each_online_pgdat(pgdat) + high_memory = max_t(void *, high_memory, + __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); - /* this will put all low memory onto the freelists */ - for_each_online_node(nid) { - pg_data_t *pgdat = NODE_DATA(nid); - unsigned long node_pages = 0; - void *node_high_memory; - - num_physpages += pgdat->node_present_pages; - - if (pgdat->node_spanned_pages != 0) - node_pages = free_all_bootmem_node(pgdat); - - totalram_pages += node_pages; - - for (i = 0; i < node_pages; i++) - if (PageReserved(pgdat->node_mem_map + i)) - reservedpages++; - - node_high_memory = (void *)((pgdat->node_start_pfn - + pgdat->node_spanned_pages) - << PAGE_SHIFT); - if (node_high_memory > high_memory) - high_memory = node_high_memory; - } - - max_mapnr = MAP_NR(high_memory); - - codesize = (unsigned long)_etext - (unsigned long)_text; - datasize = (unsigned long)_edata - (unsigned long)_data; - initsize = (unsigned long)__init_end - (unsigned long)__init_begin; - - printk ("Memory: %luk/%luk available (%dk kernel code, " - "%dk reserved, %dk data, %dk init)\n", - nr_free_pages() << (PAGE_SHIFT - 10), - totalram_pages << (PAGE_SHIFT - 10), - codesize >> 10, - reservedpages << (PAGE_SHIFT - 10), - datasize >> 10, - initsize >> 10); + set_max_mapnr(MAP_NR(high_memory)); + free_all_bootmem(); + mem_init_print_info(NULL); } void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index b573827d0416..3b6abc54b015 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -41,6 +41,7 @@ config BLACKFIN select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA + select HAVE_DEBUG_STACKOVERFLOW config GENERIC_CSUM def_bool y @@ -282,7 +283,7 @@ config BF_REV_0_0 config BF_REV_0_1 bool "0.1" - depends on (BF51x || BF52x || (BF54x && !BF54xM)) + depends on (BF51x || BF52x || (BF54x && !BF54xM) || BF60x) config BF_REV_0_2 bool "0.2" diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug index 79594694ee90..f3337ee03621 100644 --- a/arch/blackfin/Kconfig.debug +++ b/arch/blackfin/Kconfig.debug @@ -2,13 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - help - This option will cause messages to be printed if free stack space - drops below a certain limit. - config DEBUG_VERBOSE bool "Verbose fault messages" default y diff --git a/arch/blackfin/include/asm/bfin6xx_spi.h b/arch/blackfin/include/asm/bfin_spi3.h index 89370b653dcd..0957e65a54be 100644 --- a/arch/blackfin/include/asm/bfin6xx_spi.h +++ b/arch/blackfin/include/asm/bfin_spi3.h @@ -240,7 +240,7 @@ struct bfin_spi_regs { #define MAX_CTRL_CS 8 /* cs in spi controller */ /* device.platform_data for SSP controller devices */ -struct bfin6xx_spi_master { +struct bfin_spi3_master { u16 num_chipselect; u16 pin_req[7]; }; @@ -248,7 +248,7 @@ struct bfin6xx_spi_master { /* spi_board_info.controller_data for SPI slave devices, * copied to spi_device.platform_data ... mostly for dma tuning */ -struct bfin6xx_spi_chip { +struct bfin_spi3_chip { u32 control; u16 cs_chg_udelay; /* Some devices require 16-bit delays */ u32 tx_dummy_val; /* tx value for rx only transfer */ diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index b882ce22c347..fa53faeeb0e9 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c @@ -9,6 +9,7 @@ #include <linux/ptrace.h> /* for linux pt_regs struct */ #include <linux/kgdb.h> #include <linux/uaccess.h> +#include <asm/irq_regs.h> void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c index d58f50e5aa4b..1e7be62fccb6 100644 --- a/arch/blackfin/mach-bf527/boards/ad7160eval.c +++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c @@ -283,14 +283,6 @@ static struct platform_device bfin_i2s = { }; #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) -static struct platform_device bfin_tdm = { - .name = "bfin-tdm", - .id = CONFIG_SND_BF5XX_SPORT_NUM, - /* TODO: add platform data here */ -}; -#endif - static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) @@ -800,10 +792,6 @@ static struct platform_device *stamp_devices[] __initdata = { #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) &bfin_i2s, #endif - -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) - &bfin_tdm, -#endif }; static int __init ad7160eval_init(void) diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 29f16e5c37b9..d0a0c5e527cd 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -493,8 +493,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = { }; #endif -#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ - defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) static const u16 bfin_snd_pin[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, @@ -549,13 +548,6 @@ static struct platform_device bfin_i2s_pcm = { }; #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) -static struct platform_device bfin_tdm_pcm = { - .name = "bfin-tdm-pcm-audio", - .id = -1, -}; -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) static struct platform_device bfin_ac97_pcm = { .name = "bfin-ac97-pcm-audio", @@ -575,22 +567,10 @@ static struct platform_device bfin_i2s = { }; #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) -static struct platform_device bfin_tdm = { - .name = "bfin-tdm", - .id = CONFIG_SND_BF5XX_SPORT_NUM, - .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]), - .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM], - .dev = { - .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM], - }, -}; -#endif - #if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) static const char * const ad1836_link[] = { - "bfin-tdm.0", + "bfin-i2s.0", "spi0.4", }; static struct platform_device bfin_ad1836_machine = { @@ -1269,10 +1249,6 @@ static struct platform_device *stamp_devices[] __initdata = { &bfin_i2s_pcm, #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) - &bfin_tdm_pcm, -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97_pcm, #endif @@ -1281,10 +1257,6 @@ static struct platform_device *stamp_devices[] __initdata = { &bfin_i2s, #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) - &bfin_tdm, -#endif - #if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \ defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) &bfin_ad1836_machine, diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c index 07811c209b9d..90fb0d14b147 100644 --- a/arch/blackfin/mach-bf533/boards/ezkit.c +++ b/arch/blackfin/mach-bf533/boards/ezkit.c @@ -450,14 +450,6 @@ static struct platform_device bfin_i2s = { }; #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) -static struct platform_device bfin_tdm = { - .name = "bfin-tdm", - .id = CONFIG_SND_BF5XX_SPORT_NUM, - /* TODO: add platform data here */ -}; -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) static struct platform_device bfin_ac97 = { .name = "bfin-ac97", @@ -516,10 +508,6 @@ static struct platform_device *ezkit_devices[] __initdata = { &bfin_i2s, #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) - &bfin_tdm, -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97, #endif diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index 6fca8698bf3b..4a8c2e3fd7e5 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -542,8 +542,7 @@ static struct platform_device bfin_dpmc = { }; #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ - defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) \ - || defined(CONFIG_SND_BF5XX_AC97) || \ + defined(CONFIG_SND_BF5XX_AC97) || \ defined(CONFIG_SND_BF5XX_AC97_MODULE) #include <asm/bfin_sport.h> @@ -603,13 +602,6 @@ static struct platform_device bfin_i2s_pcm = { }; #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) -static struct platform_device bfin_tdm_pcm = { - .name = "bfin-tdm-pcm-audio", - .id = -1, -}; -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) static struct platform_device bfin_ac97_pcm = { .name = "bfin-ac97-pcm-audio", @@ -620,7 +612,7 @@ static struct platform_device bfin_ac97_pcm = { #if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) static const char * const ad1836_link[] = { - "bfin-tdm.0", + "bfin-i2s.0", "spi0.4", }; static struct platform_device bfin_ad1836_machine = { @@ -675,20 +667,6 @@ static struct platform_device bfin_i2s = { }; #endif -#if defined(CONFIG_SND_BF5XX_SOC_TDM) || \ - defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE) -static struct platform_device bfin_tdm = { - .name = "bfin-tdm", - .id = CONFIG_SND_BF5XX_SPORT_NUM, - .num_resources = - ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]), - .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM], - .dev = { - .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM], - }, -}; -#endif - #if defined(CONFIG_SND_BF5XX_SOC_AC97) || \ defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) static struct platform_device bfin_ac97 = { @@ -761,10 +739,6 @@ static struct platform_device *stamp_devices[] __initdata = { &bfin_i2s_pcm, #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) - &bfin_tdm_pcm, -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97_pcm, #endif @@ -792,11 +766,6 @@ static struct platform_device *stamp_devices[] __initdata = { &bfin_i2s, #endif -#if defined(CONFIG_SND_BF5XX_SOC_TDM) || \ - defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE) - &bfin_tdm, -#endif - #if defined(CONFIG_SND_BF5XX_SOC_AC97) || \ defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) &bfin_ac97, diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 6a3a14bcd3a1..44fd1d4682ac 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -2570,7 +2570,6 @@ static struct platform_device bfin_dpmc = { }; #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ - defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \ defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) #define SPORT_REQ(x) \ @@ -2628,13 +2627,6 @@ static struct platform_device bfin_i2s_pcm = { }; #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) -static struct platform_device bfin_tdm_pcm = { - .name = "bfin-tdm-pcm-audio", - .id = -1, -}; -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) static struct platform_device bfin_ac97_pcm = { .name = "bfin-ac97-pcm-audio", @@ -2645,7 +2637,7 @@ static struct platform_device bfin_ac97_pcm = { #if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) static const char * const ad1836_link[] = { - "bfin-tdm.0", + "bfin-i2s.0", "spi0.4", }; static struct platform_device bfin_ad1836_machine = { @@ -2699,18 +2691,6 @@ static struct platform_device bfin_i2s = { }; #endif -#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE) -static struct platform_device bfin_tdm = { - .name = "bfin-tdm", - .id = CONFIG_SND_BF5XX_SPORT_NUM, - .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]), - .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM], - .dev = { - .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM], - }, -}; -#endif - #if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) static struct platform_device bfin_ac97 = { .name = "bfin-ac97", @@ -2935,10 +2915,6 @@ static struct platform_device *stamp_devices[] __initdata = { &bfin_i2s_pcm, #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) - &bfin_tdm_pcm, -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97_pcm, #endif @@ -2961,10 +2937,6 @@ static struct platform_device *stamp_devices[] __initdata = { &bfin_i2s, #endif -#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE) - &bfin_tdm, -#endif - #if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) &bfin_ac97, #endif diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index c4d07f040947..372eb54944ef 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -1393,7 +1393,6 @@ static struct platform_device bfin_dpmc = { }; #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ - defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \ defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) #define SPORT_REQ(x) \ @@ -1461,13 +1460,6 @@ static struct platform_device bfin_i2s_pcm = { }; #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) -static struct platform_device bfin_tdm_pcm = { - .name = "bfin-tdm-pcm-audio", - .id = -1, -}; -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) static struct platform_device bfin_ac97_pcm = { .name = "bfin-ac97-pcm-audio", @@ -1501,18 +1493,6 @@ static struct platform_device bfin_i2s = { }; #endif -#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE) -static struct platform_device bfin_tdm = { - .name = "bfin-tdm", - .id = CONFIG_SND_BF5XX_SPORT_NUM, - .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]), - .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM], - .dev = { - .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM], - }, -}; -#endif - #if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) static struct platform_device bfin_ac97 = { .name = "bfin-ac97", @@ -1646,9 +1626,7 @@ static struct platform_device *ezkit_devices[] __initdata = { #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) &bfin_i2s_pcm, #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) - &bfin_tdm_pcm, -#endif + #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97_pcm, #endif @@ -1661,10 +1639,6 @@ static struct platform_device *ezkit_devices[] __initdata = { &bfin_i2s, #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) - &bfin_tdm, -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97, #endif diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index 551f866172cf..92938e79b9e3 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c @@ -523,14 +523,6 @@ static struct platform_device bfin_i2s = { }; #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) -static struct platform_device bfin_tdm = { - .name = "bfin-tdm", - .id = CONFIG_SND_BF5XX_SPORT_NUM, - /* TODO: add platform data here */ -}; -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) static struct platform_device bfin_ac97 = { .name = "bfin-ac97", @@ -542,7 +534,7 @@ static struct platform_device bfin_ac97 = { #if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) static const char * const ad1836_link[] = { - "bfin-tdm.0", + "bfin-i2s.0", "spi0.4", }; static struct platform_device bfin_ad1836_machine = { @@ -611,10 +603,6 @@ static struct platform_device *ezkit_devices[] __initdata = { &bfin_i2s, #endif -#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) - &bfin_tdm, -#endif - #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97, #endif diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c index ab1c617b9cfc..c77a23bc9de3 100644 --- a/arch/blackfin/mach-bf561/smp.c +++ b/arch/blackfin/mach-bf561/smp.c @@ -69,7 +69,6 @@ void __cpuinit platform_secondary_init(unsigned int cpu) SSYNC(); /* We are done with local CPU inits, unblock the boot CPU. */ - set_cpu_online(cpu, true); spin_lock(&boot_lock); spin_unlock(&boot_lock); } @@ -91,7 +90,9 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle SSYNC(); } - timeout = jiffies + 1 * HZ; + timeout = jiffies + HZ; + /* release the lock and let coreb run */ + spin_unlock(&boot_lock); while (time_before(jiffies, timeout)) { if (cpu_online(cpu)) break; @@ -100,8 +101,6 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle } if (cpu_online(cpu)) { - /* release the lock and let coreb run */ - spin_unlock(&boot_lock); return 0; } else panic("CPU%u: processor failed to boot\n", cpu); diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index 97d701639585..0bc47231540b 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c @@ -17,7 +17,7 @@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> -#include <asm/bfin6xx_spi.h> +#include <asm/bfin_spi3.h> #include <asm/dma.h> #include <asm/gpio.h> #include <asm/nand.h> @@ -108,7 +108,6 @@ static struct platform_device bfin_rotary_device = { static unsigned short pins[] = P_RMII0; static struct stmmac_mdio_bus_data phy_private_data = { - .bus_id = 0, .phy_mask = 1, }; @@ -745,13 +744,13 @@ static struct flash_platform_data bfin_spi_flash_data = { .type = "w25q32", }; -static struct bfin6xx_spi_chip spi_flash_chip_info = { +static struct bfin_spi3_chip spi_flash_chip_info = { .enable_dma = true, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) -static struct bfin6xx_spi_chip spidev_chip_info = { +static struct bfin_spi3_chip spidev_chip_info = { .enable_dma = true, }; #endif @@ -821,7 +820,7 @@ static struct platform_device bfin_i2s = { #if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) static const char * const ad1836_link[] = { - "bfin-tdm.0", + "bfin-i2s.0", "spi0.76", }; static struct platform_device bfin_ad1836_machine = { @@ -1296,7 +1295,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif }; -#if defined(CONFIG_SPI_BFIN6XX) || defined(CONFIG_SPI_BFIN6XX_MODULE) +#if IS_ENABLED(CONFIG_SPI_BFIN_V3) /* SPI (0) */ static struct resource bfin_spi0_resource[] = { { @@ -1337,13 +1336,13 @@ static struct resource bfin_spi1_resource[] = { }; /* SPI controller data */ -static struct bfin6xx_spi_master bf60x_spi_master_info0 = { +static struct bfin_spi3_master bf60x_spi_master_info0 = { .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bf60x_spi_master0 = { - .name = "bfin-spi", + .name = "bfin-spi3", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, @@ -1352,13 +1351,13 @@ static struct platform_device bf60x_spi_master0 = { }, }; -static struct bfin6xx_spi_master bf60x_spi_master_info1 = { +static struct bfin_spi3_master bf60x_spi_master_info1 = { .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, .pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0}, }; static struct platform_device bf60x_spi_master1 = { - .name = "bfin-spi", + .name = "bfin-spi3", .id = 1, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi1_resource), .resource = bfin_spi1_resource, @@ -1534,7 +1533,7 @@ static struct platform_device *ezkit_devices[] __initdata = { &bfin_sdh_device, #endif -#if defined(CONFIG_SPI_BFIN6XX) || defined(CONFIG_SPI_BFIN6XX_MODULE) +#if IS_ENABLED(CONFIG_SPI_BFIN_V3) &bf60x_spi_master0, &bf60x_spi_master1, #endif diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 1bc2ce6f3c94..961d8392e5e3 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -49,6 +49,7 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS]; struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; enum ipi_message_type { + BFIN_IPI_NONE, BFIN_IPI_TIMER, BFIN_IPI_RESCHEDULE, BFIN_IPI_CALL_FUNC, @@ -72,8 +73,8 @@ static DEFINE_SPINLOCK(stop_lock); /* Simple FIFO buffer, overflow leads to panic */ struct ipi_data { - unsigned long count; - unsigned long bits; + atomic_t count; + atomic_t bits; }; static DEFINE_PER_CPU(struct ipi_data, bfin_ipi); @@ -146,7 +147,6 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) platform_clear_ipi(cpu, IRQ_SUPPLE_1); bfin_ipi_data = &__get_cpu_var(bfin_ipi); - smp_mb(); while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { msg = 0; do { @@ -170,9 +170,8 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) ipi_cpu_stop(cpu); break; } + atomic_dec(&bfin_ipi_data->count); } while (msg < BITS_PER_LONG); - - smp_mb(); } return IRQ_HANDLED; } @@ -195,12 +194,10 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) unsigned long flags; local_irq_save(flags); - smp_mb(); for_each_cpu(cpu, cpumask) { bfin_ipi_data = &per_cpu(bfin_ipi, cpu); - smp_mb(); - set_bit(msg, &bfin_ipi_data->bits); - bfin_ipi_data->count++; + atomic_set_mask((1 << msg), &bfin_ipi_data->bits); + atomic_inc(&bfin_ipi_data->count); platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); } @@ -319,7 +316,6 @@ void __cpuinit secondary_start_kernel(void) setup_secondary(cpu); platform_secondary_init(cpu); - /* setup local core timer */ bfin_local_timer_setup(); @@ -335,6 +331,8 @@ void __cpuinit secondary_start_kernel(void) */ calibrate_delay(); + /* We are done with local CPU inits, unblock the boot CPU. */ + set_cpu_online(cpu, true); cpu_startup_entry(CPUHP_ONLINE); } diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c index 82d01a71207f..166842de3dc7 100644 --- a/arch/blackfin/mm/init.c +++ b/arch/blackfin/mm/init.c @@ -90,50 +90,24 @@ asmlinkage void __init init_pda(void) void __init mem_init(void) { - unsigned int codek = 0, datak = 0, initk = 0; - unsigned int reservedpages = 0, freepages = 0; - unsigned long tmp; - unsigned long start_mem = memory_start; - unsigned long end_mem = memory_end; + char buf[64]; - end_mem &= PAGE_MASK; - high_memory = (void *)end_mem; - - start_mem = PAGE_ALIGN(start_mem); - max_mapnr = num_physpages = MAP_NR(high_memory); - printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages); + high_memory = (void *)(memory_end & PAGE_MASK); + max_mapnr = MAP_NR(high_memory); + printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", max_mapnr); /* This will put all low memory onto the freelists. */ - totalram_pages = free_all_bootmem(); - - reservedpages = 0; - for (tmp = ARCH_PFN_OFFSET; tmp < max_mapnr; tmp++) - if (PageReserved(pfn_to_page(tmp))) - reservedpages++; - freepages = max_mapnr - ARCH_PFN_OFFSET - reservedpages; - - /* do not count in kernel image between _rambase and _ramstart */ - reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT; -#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) - reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT; -#endif - - codek = (_etext - _stext) >> 10; - initk = (__init_end - __init_begin) >> 10; - datak = ((_ramstart - _rambase) >> 10) - codek - initk; + free_all_bootmem(); - printk(KERN_INFO - "Memory available: %luk/%luk RAM, " - "(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n", - (unsigned long) freepages << (PAGE_SHIFT-10), (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 10, - initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); + snprintf(buf, sizeof(buf) - 1, "%uK DMA", DMA_UNCACHED_REGION >> 10); + mem_init_print_info(buf); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { #ifndef CONFIG_MPU - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); #endif } #endif @@ -141,7 +115,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) void __init_refok free_initmem(void) { #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU - free_initmem_default(0); + free_initmem_default(-1); if (memory_start == (unsigned long)(&__init_end)) memory_start = (unsigned long)(&__init_begin); #endif diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S index 1d81c4c129ec..279d80725128 100644 --- a/arch/c6x/kernel/vmlinux.lds.S +++ b/arch/c6x/kernel/vmlinux.lds.S @@ -54,16 +54,15 @@ SECTIONS } . = ALIGN(PAGE_SIZE); + __init_begin = .; .init : { - _stext = .; _sinittext = .; HEAD_TEXT INIT_TEXT _einittext = .; } - __init_begin = _stext; INIT_DATA_SECTION(16) PERCPU_SECTION(128) @@ -74,6 +73,7 @@ SECTIONS .text : { _text = .; + _stext = .; TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c index a9fcd89b251b..63f5560d6eb2 100644 --- a/arch/c6x/mm/init.c +++ b/arch/c6x/mm/init.c @@ -18,6 +18,7 @@ #include <linux/initrd.h> #include <asm/sections.h> +#include <asm/uaccess.h> /* * ZERO_PAGE is a special page that is used for zero-initialized @@ -57,31 +58,22 @@ void __init paging_init(void) void __init mem_init(void) { - int codek, datak; - unsigned long tmp; - unsigned long len = memory_end - memory_start; - high_memory = (void *)(memory_end & PAGE_MASK); /* this will put all memory onto the freelists */ - totalram_pages = free_all_bootmem(); - - codek = (_etext - _stext) >> 10; - datak = (_end - _sdata) >> 10; + free_all_bootmem(); - tmp = nr_free_pages() << PAGE_SHIFT; - printk(KERN_INFO "Memory: %luk/%luk RAM (%dk kernel code, %dk data)\n", - tmp >> 10, len >> 10, codek, datak); + mem_init_print_info(NULL); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif void __init free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 9f3c54360e78..3201ddb8da6a 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -134,11 +134,13 @@ config SVINTO_SIM config ETRAXFS bool "ETRAX-FS-V32" + select CPU_FREQ_TABLE if CPU_FREQ help Support CRIS V32. config CRIS_MACH_ARTPEC3 bool "ARTPEC-3" + select CPU_FREQ_TABLE if CPU_FREQ help Support Axis ARTPEC-3. diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig index 5f2cdb3e428c..daf5f19b61a1 100644 --- a/arch/cris/arch-v10/drivers/Kconfig +++ b/arch/cris/arch-v10/drivers/Kconfig @@ -2,9 +2,7 @@ if ETRAX_ARCH_V10 config ETRAX_ETHERNET bool "Ethernet support" - depends on ETRAX_ARCH_V10 - select ETHERNET - select NET_CORE + depends on ETRAX_ARCH_V10 && NETDEVICES select MII help This option enables the ETRAX 100LX built-in 10/100Mbit Ethernet diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig index acff3df8c43f..1d866d3ee2f8 100644 --- a/arch/cris/arch-v32/drivers/Kconfig +++ b/arch/cris/arch-v32/drivers/Kconfig @@ -2,9 +2,7 @@ if ETRAX_ARCH_V32 config ETRAX_ETHERNET bool "Ethernet support" - depends on ETRAX_ARCH_V32 - select ETHERNET - select NET_CORE + depends on ETRAX_ARCH_V32 && NETDEVICES select MII help This option enables the ETRAX FS built-in 10/100Mbit Ethernet diff --git a/arch/cris/include/asm/page.h b/arch/cris/include/asm/page.h index be45ee366be9..dfc53f9b88ec 100644 --- a/arch/cris/include/asm/page.h +++ b/arch/cris/include/asm/page.h @@ -51,7 +51,6 @@ typedef struct page *pgtable_t; */ #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT)) -#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) #define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT) /* convert a page (based on mem_map and forward) to a physical address diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h index ba409c9947bc..eb723e51554e 100644 --- a/arch/cris/include/uapi/asm/socket.h +++ b/arch/cris/include/uapi/asm/socket.h @@ -76,6 +76,8 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c index 9ac80946dada..c81af5bd9167 100644 --- a/arch/cris/mm/init.c +++ b/arch/cris/mm/init.c @@ -19,9 +19,6 @@ unsigned long empty_zero_page; void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - unsigned long tmp; - BUG_ON(!mem_map); /* max/min_low_pfn was set by setup.c @@ -29,35 +26,9 @@ mem_init(void) * * high_memory was also set in setup.c */ - - max_mapnr = num_physpages = max_low_pfn - min_low_pfn; - - /* this will put all memory onto the freelists */ - totalram_pages = free_all_bootmem(); - - reservedpages = 0; - for (tmp = 0; tmp < max_mapnr; tmp++) { - /* - * Only count reserved RAM pages - */ - if (PageReserved(mem_map + tmp)) - reservedpages++; - } - - codesize = (unsigned long) &_etext - (unsigned long) &_stext; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO - "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, " - "%dk init)\n" , - nr_free_pages() << (PAGE_SHIFT-10), - max_mapnr << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10 - ); + max_mapnr = max_low_pfn - min_low_pfn; + free_all_bootmem(); + mem_init_print_info(NULL); } /* free the pages occupied by initialization code */ @@ -65,5 +36,5 @@ mem_init(void) void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 2ce731f9aa4d..4b6628ea381e 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -14,6 +14,7 @@ config FRV select ARCH_WANT_IPC_PARSE_VERSION select OLD_SIGSUSPEND3 select OLD_SIGACTION + select HAVE_DEBUG_STACKOVERFLOW config ZONE_DMA bool diff --git a/arch/frv/Kconfig.debug b/arch/frv/Kconfig.debug index 211f01bc4caa..98c99a3ed2be 100644 --- a/arch/frv/Kconfig.debug +++ b/arch/frv/Kconfig.debug @@ -2,10 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - config GDBSTUB bool "Remote GDB kernel debugging" depends on DEBUG_KERNEL diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h index 31dbb5d8e13d..f0cb1c341163 100644 --- a/arch/frv/include/uapi/asm/socket.h +++ b/arch/frv/include/uapi/asm/socket.h @@ -74,5 +74,7 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c index 0b579927439d..ac767d94a880 100644 --- a/arch/frv/kernel/pm.c +++ b/arch/frv/kernel/pm.c @@ -150,7 +150,7 @@ static int user_atoi(char __user *ubuf, size_t len) /* * Send us to sleep. */ -static int sysctl_pm_do_suspend(ctl_table *ctl, int write, +static int sysctl_pm_do_suspend(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos) { int mode; @@ -197,7 +197,7 @@ static int try_set_cmode(int new_cmode) } -static int cmode_procctl(ctl_table *ctl, int write, +static int cmode_procctl(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos) { int new_cmode; @@ -269,7 +269,7 @@ static int try_set_cm(int new_cm) return 0; } -static int p0_procctl(ctl_table *ctl, int write, +static int p0_procctl(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos) { int new_p0; @@ -282,7 +282,7 @@ static int p0_procctl(ctl_table *ctl, int write, return try_set_p0(new_p0)?:*lenp; } -static int cm_procctl(ctl_table *ctl, int write, +static int cm_procctl(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos) { int new_cm; diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c index a5136474c6fd..ae3a6706419b 100644 --- a/arch/frv/kernel/setup.c +++ b/arch/frv/kernel/setup.c @@ -735,7 +735,7 @@ static void __init parse_cmdline_early(char *cmdline) /* "mem=XXX[kKmM]" sets SDRAM size to <mem>, overriding the value we worked * out from the SDRAM controller mask register */ - if (!memcmp(cmdline, "mem=", 4)) { + if (!strncmp(cmdline, "mem=", 4)) { unsigned long long mem_size; mem_size = memparse(cmdline + 4, &cmdline); @@ -876,6 +876,7 @@ late_initcall(setup_arch_serial); static void __init setup_linux_memory(void) { unsigned long bootmap_size, low_top_pfn, kstart, kend, high_mem; + unsigned long physpages; kstart = (unsigned long) &__kernel_image_start - PAGE_OFFSET; kend = (unsigned long) &__kernel_image_end - PAGE_OFFSET; @@ -893,19 +894,19 @@ static void __init setup_linux_memory(void) ); /* pass the memory that the kernel can immediately use over to the bootmem allocator */ - max_mapnr = num_physpages = (memory_end - memory_start) >> PAGE_SHIFT; + max_mapnr = physpages = (memory_end - memory_start) >> PAGE_SHIFT; low_top_pfn = (KERNEL_LOWMEM_END - KERNEL_LOWMEM_START) >> PAGE_SHIFT; high_mem = 0; - if (num_physpages > low_top_pfn) { + if (physpages > low_top_pfn) { #ifdef CONFIG_HIGHMEM - high_mem = num_physpages - low_top_pfn; + high_mem = physpages - low_top_pfn; #else - max_mapnr = num_physpages = low_top_pfn; + max_mapnr = physpages = low_top_pfn; #endif } else { - low_top_pfn = num_physpages; + low_top_pfn = physpages; } min_low_pfn = memory_start >> PAGE_SHIFT; @@ -979,7 +980,7 @@ static void __init setup_uclinux_memory(void) free_bootmem(memory_start, memory_end - memory_start); high_memory = (void *) (memory_end & PAGE_MASK); - max_mapnr = num_physpages = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT; + max_mapnr = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT; min_low_pfn = memory_start >> PAGE_SHIFT; max_low_pfn = memory_end >> PAGE_SHIFT; diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c index 6c155d69da29..f4dfae2c75ad 100644 --- a/arch/frv/kernel/sysctl.c +++ b/arch/frv/kernel/sysctl.c @@ -46,7 +46,7 @@ static void frv_change_dcache_mode(unsigned long newmode) /* * handle requests to dynamically switch the write caching mode delivered by /proc */ -static int procctl_frv_cachemode(ctl_table *table, int write, +static int procctl_frv_cachemode(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -121,7 +121,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, * permit the mm_struct the nominated process is using have its MMU context ID pinned */ #ifdef CONFIG_MMU -static int procctl_frv_pin_cxnr(ctl_table *table, int write, +static int procctl_frv_pin_cxnr(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index 4bff48c19d29..a6d105d61b26 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c @@ -523,7 +523,7 @@ void die_if_kernel(const char *str, ...) return; va_start(va, str); - vsprintf(buffer, str, va); + vsnprintf(buffer, sizeof(buffer), str, va); va_end(va); console_verbose(); diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index dee354fa6b64..88a159743528 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c @@ -78,7 +78,7 @@ void __init paging_init(void) memset((void *) empty_zero_page, 0, PAGE_SIZE); #ifdef CONFIG_HIGHMEM - if (num_physpages - num_mappedpages) { + if (get_num_physpages() - num_mappedpages) { pgd_t *pge; pud_t *pue; pmd_t *pme; @@ -96,7 +96,7 @@ void __init paging_init(void) */ zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; #ifdef CONFIG_HIGHMEM - zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages; + zones_size[ZONE_HIGHMEM] = get_num_physpages() - num_mappedpages; #endif free_area_init(zones_size); @@ -114,45 +114,24 @@ void __init paging_init(void) */ void __init mem_init(void) { - unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT; - unsigned long tmp; -#ifdef CONFIG_MMU - unsigned long loop, pfn; - int datapages = 0; -#endif - int codek = 0, datak = 0; + unsigned long code_size = _etext - _stext; /* this will put all low memory onto the freelists */ - totalram_pages = free_all_bootmem(); - -#ifdef CONFIG_MMU - for (loop = 0 ; loop < npages ; loop++) - if (PageReserved(&mem_map[loop])) - datapages++; - -#ifdef CONFIG_HIGHMEM - for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) - free_highmem_page(&mem_map[pfn]); -#endif - - codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10; - datak = datapages << (PAGE_SHIFT - 10); - -#else - codek = (_etext - _stext) >> 10; - datak = 0; //(__bss_stop - _sdata) >> 10; + free_all_bootmem(); +#if defined(CONFIG_MMU) && defined(CONFIG_HIGHMEM) + { + unsigned long pfn; + + for (pfn = get_num_physpages() - 1; + pfn >= num_mappedpages; pfn--) + free_highmem_page(&mem_map[pfn]); + } #endif - tmp = nr_free_pages() << PAGE_SHIFT; - printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n", - tmp >> 10, - npages << (PAGE_SHIFT - 10), - (rom_length > 0) ? ((rom_length >> 10) - codek) : 0, - rom_length >> 10, - codek, - datak - ); - + mem_init_print_info(NULL); + if (rom_length > 0 && rom_length >= code_size) + printk("Memory available: %luKiB/%luKiB ROM\n", + (rom_length - code_size) >> 10, rom_length >> 10); } /* end mem_init() */ /*****************************************************************************/ @@ -162,7 +141,7 @@ void __init mem_init(void) void free_initmem(void) { #if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) - free_initmem_default(0); + free_initmem_default(-1); #endif } /* end free_initmem() */ @@ -173,6 +152,6 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } /* end free_initrd_mem() */ #endif diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c index 51ab6cbd030f..4a1e3dd43948 100644 --- a/arch/h8300/boot/compressed/misc.c +++ b/arch/h8300/boot/compressed/misc.c @@ -79,7 +79,6 @@ static void error(char *m); int puts(const char *); -extern int _text; /* Defined in vmlinux.lds.S */ extern int _end; static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h index 5d1c6d0870e6..9490758c5e2b 100644 --- a/arch/h8300/include/uapi/asm/socket.h +++ b/arch/h8300/include/uapi/asm/socket.h @@ -74,4 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index 03d356d96e5d..3253fed42ac1 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -132,10 +132,12 @@ SECTIONS { . = ALIGN(0x4) ; __sbss = . ; + ___bss_start = . ; *(.bss*) . = ALIGN(0x4) ; *(COMMON) . = ALIGN(0x4) ; + ___bss_stop = . ; __ebss = . ; __end = . ; __ramstart = .; diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index ff349d70a29b..6c1251e491af 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -121,47 +121,27 @@ void __init paging_init(void) void __init mem_init(void) { - int codek = 0, datak = 0, initk = 0; - /* DAVIDM look at setup memory map generically with reserved area */ - unsigned long tmp; - extern unsigned long _ramend, _ramstart; - unsigned long len = &_ramend - &_ramstart; - unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */ - unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */ + unsigned long codesize = _etext - _stext; -#ifdef DEBUG - printk(KERN_DEBUG "Mem_init: start=%lx, end=%lx\n", start_mem, end_mem); -#endif + pr_devel("Mem_init: start=%lx, end=%lx\n", memory_start, memory_end); - end_mem &= PAGE_MASK; - high_memory = (void *) end_mem; - - start_mem = PAGE_ALIGN(start_mem); - max_mapnr = num_physpages = MAP_NR(high_memory); + high_memory = (void *) (memory_end & PAGE_MASK); + max_mapnr = MAP_NR(high_memory); /* this will put all low memory onto the freelists */ - totalram_pages = free_all_bootmem(); - - codek = (_etext - _stext) >> 10; - datak = (__bss_stop - _sdata) >> 10; - initk = (__init_begin - __init_end) >> 10; - - tmp = nr_free_pages() << PAGE_SHIFT; - printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n", - tmp >> 10, - len >> 10, - (rom_length > 0) ? ((rom_length >> 10) - codek) : 0, - rom_length >> 10, - codek, - datak - ); + free_all_bootmem(); + + mem_init_print_info(NULL); + if (rom_length > 0 && rom_length > codesize) + pr_info("Memory available: %luK/%luK ROM\n", + (rom_length - codesize) >> 10, rom_length >> 10); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif @@ -169,7 +149,7 @@ void free_initmem(void) { #ifdef CONFIG_RAMKERNEL - free_initmem_default(0); + free_initmem_default(-1); #endif } diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 2561d259a296..88977e42af0a 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -70,10 +70,8 @@ unsigned long long kmap_generation; void __init mem_init(void) { /* No idea where this is actually declared. Seems to evade LXR. */ - totalram_pages += free_all_bootmem(); - num_physpages = bootmem_lastpg-ARCH_PFN_OFFSET; - - printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages); + free_all_bootmem(); + mem_init_print_info(NULL); /* * To-Do: someone somewhere should wipe out the bootmem map diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index bcda5b2d121a..d43daf192b21 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2042,7 +2042,8 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) #endif static int __init -acpi_sba_ioc_add(struct acpi_device *device) +acpi_sba_ioc_add(struct acpi_device *device, + const struct acpi_device_id *not_used) { struct ioc *ioc; acpi_status status; @@ -2090,14 +2091,18 @@ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = { {"HWP0004", 0}, {"", 0}, }; -static struct acpi_driver acpi_sba_ioc_driver = { - .name = "IOC IOMMU Driver", - .ids = hp_ioc_iommu_device_ids, - .ops = { - .add = acpi_sba_ioc_add, - }, +static struct acpi_scan_handler acpi_sba_ioc_handler = { + .ids = hp_ioc_iommu_device_ids, + .attach = acpi_sba_ioc_add, }; +static int __init acpi_sba_ioc_init_acpi(void) +{ + return acpi_scan_add_handler(&acpi_sba_ioc_handler); +} +/* This has to run before acpi_scan_init(). */ +arch_initcall(acpi_sba_ioc_init_acpi); + extern struct dma_map_ops swiotlb_dma_ops; static int __init @@ -2122,7 +2127,10 @@ sba_init(void) } #endif - acpi_bus_register_driver(&acpi_sba_ioc_driver); + /* + * ioc_list should be populated by the acpi_sba_ioc_handler's .attach() + * routine, but that only happens if acpi_scan_init() has already run. + */ if (!ioc_list) { #ifdef CONFIG_IA64_GENERIC /* diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c index 271f412bda1a..87bf9ad8cf0f 100644 --- a/arch/ia64/hp/sim/boot/fw-emu.c +++ b/arch/ia64/hp/sim/boot/fw-emu.c @@ -290,16 +290,16 @@ sys_fw_init (const char *args, int arglen) efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE; efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION; efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr); - efi_runtime->get_time = __pa(&fw_efi_get_time); - efi_runtime->set_time = __pa(&efi_unimplemented); - efi_runtime->get_wakeup_time = __pa(&efi_unimplemented); - efi_runtime->set_wakeup_time = __pa(&efi_unimplemented); - efi_runtime->set_virtual_address_map = __pa(&efi_unimplemented); - efi_runtime->get_variable = __pa(&efi_unimplemented); - efi_runtime->get_next_variable = __pa(&efi_unimplemented); - efi_runtime->set_variable = __pa(&efi_unimplemented); - efi_runtime->get_next_high_mono_count = __pa(&efi_unimplemented); - efi_runtime->reset_system = __pa(&efi_reset_system); + efi_runtime->get_time = (void *)__pa(&fw_efi_get_time); + efi_runtime->set_time = (void *)__pa(&efi_unimplemented); + efi_runtime->get_wakeup_time = (void *)__pa(&efi_unimplemented); + efi_runtime->set_wakeup_time = (void *)__pa(&efi_unimplemented); + efi_runtime->set_virtual_address_map = (void *)__pa(&efi_unimplemented); + efi_runtime->get_variable = (void *)__pa(&efi_unimplemented); + efi_runtime->get_next_variable = (void *)__pa(&efi_unimplemented); + efi_runtime->set_variable = (void *)__pa(&efi_unimplemented); + efi_runtime->get_next_high_mono_count = (void *)__pa(&efi_unimplemented); + efi_runtime->reset_system = (void *)__pa(&efi_reset_system); efi_tables->guid = SAL_SYSTEM_TABLE_GUID; efi_tables->table = __pa(sal_systab); diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index c13064e422df..d1b04c4c95e3 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -268,7 +268,7 @@ static __inline__ int dev_is_ethdev(struct net_device *dev) static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct simeth_local *local; struct in_device *in_dev; struct in_ifaddr **ifap = NULL; diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 6b4329f18b29..556d0701a155 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -83,4 +83,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index f034563aeae5..51bce594eb83 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -1116,11 +1116,6 @@ efi_memmap_init(u64 *s, u64 *e) if (!is_memory_available(md)) continue; -#ifdef CONFIG_CRASH_DUMP - /* saved_max_pfn should ignore max_addr= command line arg */ - if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT)) - saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT); -#endif /* * Round ends inward to granule boundaries * Give trimmings to uncached allocator diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 5b2dc0d10c8f..bdfd8789b376 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1560,6 +1560,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) return 0; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 142c3b785944..da5237d636d6 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -294,14 +294,6 @@ find_memory (void) alloc_per_cpu_data(); } -static int count_pages(u64 start, u64 end, void *arg) -{ - unsigned long *count = arg; - - *count += (end - start) >> PAGE_SHIFT; - return 0; -} - /* * Set up the page tables. */ @@ -312,9 +304,6 @@ paging_init (void) unsigned long max_dma; unsigned long max_zone_pfns[MAX_NR_ZONES]; - num_physpages = 0; - efi_memmap_walk(count_pages, &num_physpages); - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 7253d83650bf..2de08f4d9930 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -37,7 +37,6 @@ struct early_node_data { struct ia64_node_data *node_data; unsigned long pernode_addr; unsigned long pernode_size; - unsigned long num_physpages; #ifdef CONFIG_ZONE_DMA unsigned long num_dma_physpages; #endif @@ -732,7 +731,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n { unsigned long end = start + len; - mem_data[node].num_physpages += len >> PAGE_SHIFT; #ifdef CONFIG_ZONE_DMA if (start <= __pa(MAX_DMA_ADDRESS)) mem_data[node].num_dma_physpages += @@ -778,7 +776,6 @@ void __init paging_init(void) #endif for_each_online_node(node) { - num_physpages += mem_data[node].num_physpages; pfn_offset = mem_data[node].min_pfn; #ifdef CONFIG_VIRTUAL_MEM_MAP diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index d1fe4b402601..b6f7f43424ec 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -154,9 +154,8 @@ ia64_init_addr_space (void) void free_initmem (void) { - free_reserved_area((unsigned long)ia64_imva(__init_begin), - (unsigned long)ia64_imva(__init_end), - 0, "unused kernel"); + free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end), + -1, "unused kernel"); } void __init @@ -546,19 +545,6 @@ int __init register_active_ranges(u64 start, u64 len, int nid) return 0; } -static int __init -count_reserved_pages(u64 start, u64 end, void *arg) -{ - unsigned long num_reserved = 0; - unsigned long *count = arg; - - for (; start < end; start += PAGE_SIZE) - if (PageReserved(virt_to_page(start))) - ++num_reserved; - *count += num_reserved; - return 0; -} - int find_max_min_low_pfn (u64 start, u64 end, void *arg) { @@ -597,8 +583,6 @@ __setup("nolwsys", nolwsys_setup); void __init mem_init (void) { - long reserved_pages, codesize, datasize, initsize; - pg_data_t *pgdat; int i; BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); @@ -616,27 +600,12 @@ mem_init (void) #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); - max_mapnr = max_low_pfn; #endif + set_max_mapnr(max_low_pfn); high_memory = __va(max_low_pfn * PAGE_SIZE); - - for_each_online_pgdat(pgdat) - if (pgdat->bdata->node_bootmem_map) - totalram_pages += free_all_bootmem_node(pgdat); - - reserved_pages = 0; - efi_memmap_walk(count_reserved_pages, &reserved_pages); - - codesize = (unsigned long) _etext - (unsigned long) _stext; - datasize = (unsigned long) _edata - (unsigned long) _etext; - initsize = (unsigned long) __init_end - (unsigned long) __init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " - "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10), - num_physpages << (PAGE_SHIFT - 10), codesize >> 10, - reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); - + free_all_bootmem(); + mem_init_print_info(NULL); /* * For fsyscall entrpoints with no light-weight handler, use the ordinary diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 2b00adedc45e..0b5ce82d203d 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -268,17 +268,10 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller, &resources); - if (bus == NULL) - goto error_return; /* error, or bus already scanned */ - - bus->sysdata = controller; - - return; - -error_return: - kfree(res); - kfree(controller); - return; + if (bus == NULL) { + kfree(res); + kfree(controller); + } } /* diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index bcd17b206571..29a7ef4e448b 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -16,6 +16,7 @@ config M32R select GENERIC_ATOMIC64 select ARCH_USES_GETTIMEOFFSET select MODULES_USE_ELF_RELA + select HAVE_DEBUG_STACKOVERFLOW config SBUS bool diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug index bb1afc1a31cc..6c612b7691b0 100644 --- a/arch/m32r/Kconfig.debug +++ b/arch/m32r/Kconfig.debug @@ -2,13 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - help - This option will cause messages to be printed if free stack space - drops below a certain limit. - config DEBUG_PAGEALLOC bool "Debug page memory allocations" depends on DEBUG_KERNEL && BROKEN diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index 2a3b59e0e171..24be7c8da86a 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -74,4 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c index 2c468e8b5853..27196303ce36 100644 --- a/arch/m32r/mm/discontig.c +++ b/arch/m32r/mm/discontig.c @@ -129,11 +129,10 @@ unsigned long __init setup_memory(void) #define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn) #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) -unsigned long __init zone_sizes_init(void) +void __init zone_sizes_init(void) { unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES]; unsigned long low, start_pfn; - unsigned long holes = 0; int nid, i; mem_prof_t *mp; @@ -147,7 +146,6 @@ unsigned long __init zone_sizes_init(void) low = MAX_LOW_PFN(nid); zones_size[ZONE_DMA] = low - start_pfn; zholes_size[ZONE_DMA] = mp->holes; - holes += zholes_size[ZONE_DMA]; node_set_state(nid, N_NORMAL_MEMORY); free_area_init_node(nid, zones_size, start_pfn, zholes_size); @@ -161,6 +159,4 @@ unsigned long __init zone_sizes_init(void) NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0; NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0; NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0; - - return holes; } diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index ab4cbce91a9b..0d4146f644dc 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c @@ -40,7 +40,6 @@ unsigned long mmu_context_cache_dat; #else unsigned long mmu_context_cache_dat[NR_CPUS]; #endif -static unsigned long hole_pages; /* * function prototype @@ -57,7 +56,7 @@ void free_initrd_mem(unsigned long, unsigned long); #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) #ifndef CONFIG_DISCONTIGMEM -unsigned long __init zone_sizes_init(void) +void __init zone_sizes_init(void) { unsigned long zones_size[MAX_NR_ZONES] = {0, }; unsigned long max_dma; @@ -83,11 +82,9 @@ unsigned long __init zone_sizes_init(void) #endif /* CONFIG_MMU */ free_area_init_node(0, zones_size, start_pfn, 0); - - return 0; } #else /* CONFIG_DISCONTIGMEM */ -extern unsigned long zone_sizes_init(void); +extern void zone_sizes_init(void); #endif /* CONFIG_DISCONTIGMEM */ /*======================================================================* @@ -105,24 +102,7 @@ void __init paging_init(void) for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++) pgd_val(pg_dir[i]) = 0; #endif /* CONFIG_MMU */ - hole_pages = zone_sizes_init(); -} - -int __init reservedpages_count(void) -{ - int reservedpages, nid, i; - - reservedpages = 0; - for_each_online_node(nid) { - unsigned long flags; - pgdat_resize_lock(NODE_DATA(nid), &flags); - for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++) - if (PageReserved(nid_page_nr(nid, i))) - reservedpages++; - pgdat_resize_unlock(NODE_DATA(nid), &flags); - } - - return reservedpages; + zone_sizes_init(); } /*======================================================================* @@ -131,48 +111,20 @@ int __init reservedpages_count(void) *======================================================================*/ void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int nid; #ifndef CONFIG_MMU extern unsigned long memory_end; -#endif - - num_physpages = 0; - for_each_online_node(nid) - num_physpages += MAX_LOW_PFN(nid) - START_PFN(nid) + 1; - - num_physpages -= hole_pages; -#ifndef CONFIG_DISCONTIGMEM - max_mapnr = num_physpages; -#endif /* CONFIG_DISCONTIGMEM */ - -#ifdef CONFIG_MMU - high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0))); -#else high_memory = (void *)(memory_end & PAGE_MASK); +#else + high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0))); #endif /* CONFIG_MMU */ /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); - /* this will put all low memory onto the freelists */ - for_each_online_node(nid) - totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); - - reservedpages = reservedpages_count() - hole_pages; - codesize = (unsigned long) &_etext - (unsigned long)&_text; - datasize = (unsigned long) &_edata - (unsigned long)&_etext; - initsize = (unsigned long) &__init_end - (unsigned long)&__init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " - "%dk reserved, %dk data, %dk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10); + set_max_mapnr(get_num_physpages()); + free_all_bootmem(); + mem_init_print_info(NULL); } /*======================================================================* @@ -181,7 +133,7 @@ void __init mem_init(void) *======================================================================*/ void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -191,6 +143,6 @@ void free_initmem(void) *======================================================================*/ void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 1af2ca3411f6..6b4baa6e4d31 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -110,7 +110,7 @@ void __init paging_init(void) void free_initmem(void) { #ifndef CONFIG_MMU_SUN3 - free_initmem_default(0); + free_initmem_default(-1); #endif /* CONFIG_MMU_SUN3 */ } @@ -146,38 +146,11 @@ void __init print_memmap(void) MLK_ROUNDUP(__bss_start, __bss_stop)); } -void __init mem_init(void) +static inline void init_pointer_tables(void) { - pg_data_t *pgdat; - int codepages = 0; - int datapages = 0; - int initpages = 0; +#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) int i; - /* this will put all memory onto the freelists */ - totalram_pages = num_physpages = 0; - for_each_online_pgdat(pgdat) { - num_physpages += pgdat->node_present_pages; - - totalram_pages += free_all_bootmem_node(pgdat); - for (i = 0; i < pgdat->node_spanned_pages; i++) { - struct page *page = pgdat->node_mem_map + i; - char *addr = page_to_virt(page); - - if (!PageReserved(page)) - continue; - if (addr >= _text && - addr < _etext) - codepages++; - else if (addr >= __init_begin && - addr < __init_end) - initpages++; - else - datapages++; - } - } - -#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) /* insert pointer tables allocated so far into the tablelist */ init_pointer_table((unsigned long)kernel_pg_dir); for (i = 0; i < PTRS_PER_PGD; i++) { @@ -189,19 +162,20 @@ void __init mem_init(void) if (zero_pgtable) init_pointer_table((unsigned long)zero_pgtable); #endif +} - pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - totalram_pages << (PAGE_SHIFT-10), - codepages << (PAGE_SHIFT-10), - datapages << (PAGE_SHIFT-10), - initpages << (PAGE_SHIFT-10)); +void __init mem_init(void) +{ + /* this will put all memory onto the freelists */ + free_all_bootmem(); + init_pointer_tables(); + mem_init_print_info(NULL); print_memmap(); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif diff --git a/arch/m68k/platform/coldfire/pci.c b/arch/m68k/platform/coldfire/pci.c index 8572246db84d..b33f97a13e6d 100644 --- a/arch/m68k/platform/coldfire/pci.c +++ b/arch/m68k/platform/coldfire/pci.c @@ -320,7 +320,6 @@ static int __init mcf_pci_init(void) pci_bus_size_bridges(rootbus); pci_bus_assign_resources(rootbus); pci_enable_bridges(rootbus); - pci_bus_add_devices(rootbus); return 0; } diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index dcd94406030e..cfd831c29824 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -30,6 +30,7 @@ config METAG select OF select OF_EARLY_FLATTREE select SPARSE_IRQ + select HAVE_DEBUG_STACKOVERFLOW config STACKTRACE_SUPPORT def_bool y diff --git a/arch/metag/Kconfig.debug b/arch/metag/Kconfig.debug index e45bbf6a7a5d..cb5c92860540 100644 --- a/arch/metag/Kconfig.debug +++ b/arch/metag/Kconfig.debug @@ -6,13 +6,6 @@ config TRACE_IRQFLAGS_SUPPORT source "lib/Kconfig.debug" -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - help - This option will cause messages to be printed if free stack space - drops below a certain limit. - config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" depends on DEBUG_KERNEL diff --git a/arch/metag/Kconfig.soc b/arch/metag/Kconfig.soc index ec079cfb7c6a..2a3c860c7525 100644 --- a/arch/metag/Kconfig.soc +++ b/arch/metag/Kconfig.soc @@ -14,6 +14,18 @@ config META21_FPGA help This is a Meta 2.1 FPGA bitstream, just a bare CPU. +config SOC_TZ1090 + bool "Toumaz Xenif TZ1090 SoC (Comet)" + select METAG_LNKGET_AROUND_CACHE + select METAG_META21 + select METAG_SMP_WRITE_REORDERING + select PINCTRL + select PINCTRL_TZ1090 + select PINCTRL_TZ1090_PDC + help + This is a Toumaz Technology Xenif TZ1090 (A.K.A. Comet) SoC containing + a 2-threaded HTP. + endchoice menu "SoC configuration" diff --git a/arch/metag/Makefile b/arch/metag/Makefile index b566116b171b..9739857bdedc 100644 --- a/arch/metag/Makefile +++ b/arch/metag/Makefile @@ -20,7 +20,7 @@ checkflags-$(CONFIG_METAG_META12) += -DMETAC_1_2 checkflags-$(CONFIG_METAG_META21) += -DMETAC_2_1 CHECKFLAGS += -D__metag__ $(checkflags-y) -KBUILD_DEFCONFIG := meta2_defconfig +KBUILD_DEFCONFIG := tz1090_defconfig sflags-$(CONFIG_METAG_META12) += -mmetac=1.2 ifeq ($(CONFIG_METAG_META12),y) diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore index a021da201156..2d6c0c160884 100644 --- a/arch/metag/boot/.gitignore +++ b/arch/metag/boot/.gitignore @@ -1,4 +1,4 @@ vmlinux* uImage* ramdisk.* -*.dtb +*.dtb* diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile index dbd95217733a..72c121879426 100644 --- a/arch/metag/boot/dts/Makefile +++ b/arch/metag/boot/dts/Makefile @@ -1,7 +1,9 @@ dtb-y += skeleton.dtb +dtb-y += tz1090_generic.dtb # Built-in dtb builtindtb-y := skeleton +builtindtb-$(CONFIG_SOC_TZ1090) := tz1090_generic ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"") builtindtb-y := $(patsubst "%",%,$(CONFIG_METAG_BUILTIN_DTB_NAME)) diff --git a/arch/metag/boot/dts/include/dt-bindings b/arch/metag/boot/dts/include/dt-bindings new file mode 120000 index 000000000000..08c00e4972fa --- /dev/null +++ b/arch/metag/boot/dts/include/dt-bindings @@ -0,0 +1 @@ +../../../../../include/dt-bindings
\ No newline at end of file diff --git a/arch/metag/boot/dts/skeleton.dts b/arch/metag/boot/dts/skeleton.dts index 7244d1f0d555..7a49aeb365d0 100644 --- a/arch/metag/boot/dts/skeleton.dts +++ b/arch/metag/boot/dts/skeleton.dts @@ -7,4 +7,4 @@ */ /dts-v1/; -/include/ "skeleton.dtsi" +#include "skeleton.dtsi" diff --git a/arch/metag/boot/dts/tz1090.dtsi b/arch/metag/boot/dts/tz1090.dtsi new file mode 100644 index 000000000000..853744652b93 --- /dev/null +++ b/arch/metag/boot/dts/tz1090.dtsi @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "skeleton.dtsi" + +/ { + compatible = "toumaz,tz1090", "img,meta"; + + interrupt-parent = <&intc>; + + intc: interrupt-controller { + compatible = "img,meta-intc"; + interrupt-controller; + #interrupt-cells = <2>; + num-banks = <2>; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + pinctrl: pinctrl@02005800 { + #gpio-range-cells = <3>; + compatible = "img,tz1090-pinctrl"; + reg = <0x02005800 0xe4>; + }; + + pdc_pinctrl: pinctrl@02006500 { + #gpio-range-cells = <3>; + compatible = "img,tz1090-pdc-pinctrl"; + reg = <0x02006500 0x100>; + }; + }; +}; diff --git a/arch/metag/boot/dts/tz1090_generic.dts b/arch/metag/boot/dts/tz1090_generic.dts new file mode 100644 index 000000000000..f96090955964 --- /dev/null +++ b/arch/metag/boot/dts/tz1090_generic.dts @@ -0,0 +1,10 @@ +/* + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +/dts-v1/; + +#include "tz1090.dtsi" diff --git a/arch/metag/configs/tz1090_defconfig b/arch/metag/configs/tz1090_defconfig new file mode 100644 index 000000000000..9f9316a6df27 --- /dev/null +++ b/arch/metag/configs/tz1090_defconfig @@ -0,0 +1,42 @@ +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_ELF_CORE is not set +CONFIG_SLAB=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_FLATMEM_MANUAL=y +CONFIG_SOC_TZ1090=y +CONFIG_METAG_HALT_ON_PANIC=y +# CONFIG_METAG_FPU is not set +CONFIG_METAG_DA=y +CONFIG_HZ_100=y +CONFIG_DEVTMPFS=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=1 +CONFIG_BLK_DEV_RAM_SIZE=16384 +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_DA_TTY=y +CONFIG_DA_CONSOLE=y +# CONFIG_DEVKMEM is not set +# CONFIG_HW_RANDOM is not set +CONFIG_GPIOLIB=y +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_DNOTIFY is not set +CONFIG_TMPFS=y +# CONFIG_MISC_FILESYSTEMS is not set +# CONFIG_SCHED_DEBUG is not set +CONFIG_DEBUG_INFO=y diff --git a/arch/metag/include/asm/bug.h b/arch/metag/include/asm/bug.h index d04b48cefecc..9f8967f10f8c 100644 --- a/arch/metag/include/asm/bug.h +++ b/arch/metag/include/asm/bug.h @@ -6,7 +6,7 @@ struct pt_regs; extern const char *trap_name(int trapno); -extern void die(const char *str, struct pt_regs *regs, long err, - unsigned long addr) __attribute__ ((noreturn)); +extern void __noreturn die(const char *str, struct pt_regs *regs, long err, + unsigned long addr); #endif diff --git a/arch/metag/include/asm/checksum.h b/arch/metag/include/asm/checksum.h index 999bf761a732..08dd1cc65799 100644 --- a/arch/metag/include/asm/checksum.h +++ b/arch/metag/include/asm/checksum.h @@ -64,7 +64,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __wsum sum) { unsigned long len_proto = (proto + len) << 8; - asm ("ADD %0, %0, %1\n" + asm ("ADDS %0, %0, %1\n" + "ADDCS %0, %0, #1\n" "ADDS %0, %0, %2\n" "ADDCS %0, %0, #1\n" "ADDS %0, %0, %3\n" diff --git a/arch/metag/include/asm/clock.h b/arch/metag/include/asm/clock.h index 3e2915a280c7..ded4ab2e1fd0 100644 --- a/arch/metag/include/asm/clock.h +++ b/arch/metag/include/asm/clock.h @@ -19,6 +19,8 @@ * core frequency will be determined like this: * Meta 1: based on loops_per_jiffy. * Meta 2: (EXPAND_TIMER_DIV + 1) MHz. + * If a "core" clock is provided by the device tree, it + * will override this function. */ struct meta_clock_desc { unsigned long (*get_core_freq)(void); @@ -27,6 +29,12 @@ struct meta_clock_desc { extern struct meta_clock_desc _meta_clock; /* + * Perform platform clock initialisation, reading clocks from device tree etc. + * Only accessible during boot. + */ +void init_metag_clocks(void); + +/* * Set up the default clock, ensuring all callbacks are valid - only accessible * during boot. */ diff --git a/arch/metag/include/asm/irq.h b/arch/metag/include/asm/irq.h index be0c8f3c5a5d..ad6bd0edbc3b 100644 --- a/arch/metag/include/asm/irq.h +++ b/arch/metag/include/asm/irq.h @@ -17,6 +17,7 @@ struct pt_regs; int tbisig_map(unsigned int hw); extern void do_IRQ(int irq, struct pt_regs *regs); +extern void init_IRQ(void); #ifdef CONFIG_METAG_SUSPEND_MEM int traps_save_context(void); diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index 9b029a7911c3..f16477d1f571 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -199,4 +199,6 @@ extern void (*soc_halt)(void); extern void show_trace(struct task_struct *tsk, unsigned long *sp, struct pt_regs *regs); +extern const struct seq_operations cpuinfo_op; + #endif diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c index 954548b1bea8..0a2385fa2a1d 100644 --- a/arch/metag/kernel/cachepart.c +++ b/arch/metag/kernel/cachepart.c @@ -100,22 +100,23 @@ void check_for_cache_aliasing(int thread_id) thread_cache_size = get_thread_cache_size(cache_type, thread_id); if (thread_cache_size < 0) - pr_emerg("Can't read %s cache size", \ + pr_emerg("Can't read %s cache size\n", cache_type ? "DCACHE" : "ICACHE"); else if (thread_cache_size == 0) /* Cache is off. No need to check for aliasing */ continue; if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) { - pr_emerg("Cache aliasing detected in %s on Thread %d", + pr_emerg("Potential cache aliasing detected in %s on Thread %d\n", cache_type ? "DCACHE" : "ICACHE", thread_id); - pr_warn("Total %s size: %u bytes", - cache_type ? "DCACHE" : "ICACHE ", + pr_warn("Total %s size: %u bytes\n", + cache_type ? "DCACHE" : "ICACHE", cache_type ? get_dcache_size() : get_icache_size()); - pr_warn("Thread %s size: %d bytes", + pr_warn("Thread %s size: %d bytes\n", cache_type ? "CACHE" : "ICACHE", thread_cache_size); - pr_warn("Page Size: %lu bytes", PAGE_SIZE); + pr_warn("Page Size: %lu bytes\n", PAGE_SIZE); + panic("Potential cache aliasing detected"); } } } diff --git a/arch/metag/kernel/clock.c b/arch/metag/kernel/clock.c index defc84056f18..6339c9c6d0ab 100644 --- a/arch/metag/kernel/clock.c +++ b/arch/metag/kernel/clock.c @@ -8,8 +8,10 @@ * published by the Free Software Foundation. */ +#include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/of.h> #include <asm/param.h> #include <asm/clock.h> @@ -34,8 +36,63 @@ static unsigned long get_core_freq_default(void) #endif } +static struct clk *clk_core; + +/* Clk based get_core_freq callback. */ +static unsigned long get_core_freq_clk(void) +{ + return clk_get_rate(clk_core); +} + +/** + * init_metag_core_clock() - Set up core clock from devicetree. + * + * Checks to see if a "core" clock is provided in the device tree, and overrides + * the get_core_freq callback to use it. + */ +static void __init init_metag_core_clock(void) +{ + /* + * See if a core clock is provided by the devicetree (and + * registered by the init callback above). + */ + struct device_node *node; + node = of_find_compatible_node(NULL, NULL, "img,meta"); + if (!node) { + pr_warn("%s: no compatible img,meta DT node found\n", + __func__); + return; + } + + clk_core = of_clk_get_by_name(node, "core"); + if (IS_ERR(clk_core)) { + pr_warn("%s: no core clock found in DT\n", + __func__); + return; + } + + /* + * Override the core frequency callback to use + * this clk. + */ + _meta_clock.get_core_freq = get_core_freq_clk; +} + +/** + * init_metag_clocks() - Set up clocks from devicetree. + * + * Set up important clocks from device tree. In particular any needed for clock + * sources. + */ +void __init init_metag_clocks(void) +{ + init_metag_core_clock(); + + pr_info("Core clock frequency: %lu Hz\n", get_coreclock()); +} + /** - * setup_meta_clocks() - Set up the Meta clock. + * setup_meta_clocks() - Early set up of the Meta clock. * @desc: Clock descriptor usually provided by machine description * * Ensures all callbacks are valid. diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c index 87707efeb0a3..2a2c9d55187e 100644 --- a/arch/metag/kernel/irq.c +++ b/arch/metag/kernel/irq.c @@ -25,7 +25,7 @@ static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; #endif -struct irq_domain *root_domain; +static struct irq_domain *root_domain; static unsigned int startup_meta_irq(struct irq_data *data) { @@ -279,11 +279,12 @@ static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) { struct irq_desc *desc = irq_to_desc(irq); struct irq_chip *chip = irq_data_get_irq_chip(data); + unsigned long flags; - raw_spin_lock_irq(&desc->lock); + raw_spin_lock_irqsave(&desc->lock, flags); if (chip->irq_set_affinity) chip->irq_set_affinity(data, cpumask_of(cpu), false); - raw_spin_unlock_irq(&desc->lock); + raw_spin_unlock_irqrestore(&desc->lock, flags); } /* diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c index 50fcbec98cd2..beb377621322 100644 --- a/arch/metag/kernel/kick.c +++ b/arch/metag/kernel/kick.c @@ -26,6 +26,8 @@ * pass it as an argument. */ #include <linux/export.h> +#include <linux/hardirq.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/types.h> @@ -66,6 +68,7 @@ EXPORT_SYMBOL(kick_unregister_func); TBIRES kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) { + struct pt_regs *old_regs; struct kick_irq_handler *kh; struct list_head *lh; int handled = 0; @@ -79,6 +82,9 @@ kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) trace_hardirqs_off(); + old_regs = set_irq_regs((struct pt_regs *)State.Sig.pCtx); + irq_enter(); + /* * There is no need to disable interrupts here because we * can't nest KICK interrupts in a KICK interrupt handler. @@ -97,5 +103,8 @@ kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) WARN_ON(!handled); + irq_exit(); + set_irq_regs(old_regs); + return tail_end(ret); } diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index ec872ef14eb1..215c94ad63ac 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -1,5 +1,7 @@ #include <linux/export.h> +#include <linux/types.h> +#include <asm/checksum.h> #include <asm/div64.h> #include <asm/ftrace.h> #include <asm/page.h> @@ -15,6 +17,9 @@ EXPORT_SYMBOL(max_pfn); EXPORT_SYMBOL(min_low_pfn); #endif +/* Network checksum functions */ +EXPORT_SYMBOL(csum_partial); + /* TBI symbols */ EXPORT_SYMBOL(__TBI); EXPORT_SYMBOL(__TBIFindSeg); diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index 4f5726f1a55b..c396cd0b425f 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -20,6 +20,7 @@ #include <linux/memblock.h> #include <linux/mm.h> #include <linux/of_fdt.h> +#include <linux/of_platform.h> #include <linux/pfn.h> #include <linux/root_dev.h> #include <linux/sched.h> @@ -424,6 +425,9 @@ static int __init customize_machine(void) /* customizes platform devices, or adds new ones */ if (machine_desc->init_machine) machine_desc->init_machine(); + else + of_platform_populate(NULL, of_default_bus_match_table, NULL, + NULL); return 0; } arch_initcall(customize_machine); @@ -587,20 +591,20 @@ PTBI pTBI_get(unsigned int cpu) EXPORT_SYMBOL(pTBI_get); #if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU) -char capabilites[] = "dsp fpu"; +static char capabilities[] = "dsp fpu"; #elif defined(CONFIG_METAG_DSP) -char capabilites[] = "dsp"; +static char capabilities[] = "dsp"; #elif defined(CONFIG_METAG_FPU) -char capabilites[] = "fpu"; +static char capabilities[] = "fpu"; #else -char capabilites[] = ""; +static char capabilities[] = ""; #endif static struct ctl_table caps_kern_table[] = { { .procname = "capabilities", - .data = capabilites, - .maxlen = sizeof(capabilites), + .data = capabilities, + .maxlen = sizeof(capabilities), .mode = 0444, .proc_handler = proc_dostring, }, diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index f443ec9a7cbe..e413875cf6d2 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include <linux/atomic.h> +#include <linux/completion.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> @@ -62,6 +63,8 @@ static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { static DEFINE_SPINLOCK(boot_lock); +static DECLARE_COMPLETION(cpu_running); + /* * "thread" is assumed to be a valid Meta hardware thread ID. */ @@ -235,20 +238,12 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) */ ret = boot_secondary(thread, idle); if (ret == 0) { - unsigned long timeout; - /* * CPU was successfully started, wait for it * to come online or time out. */ - timeout = jiffies + HZ; - while (time_before(jiffies, timeout)) { - if (cpu_online(cpu)) - break; - - udelay(10); - barrier(); - } + wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(1000)); if (!cpu_online(cpu)) ret = -EIO; @@ -276,7 +271,6 @@ static DECLARE_COMPLETION(cpu_killed); int __cpuexit __cpu_disable(void) { unsigned int cpu = smp_processor_id(); - struct task_struct *p; /* * Take this CPU offline. Once we clear this, we can't return, @@ -296,12 +290,7 @@ int __cpuexit __cpu_disable(void) flush_cache_all(); local_flush_tlb_all(); - read_lock(&tasklist_lock); - for_each_process(p) { - if (p->mm) - cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); - } - read_unlock(&tasklist_lock); + clear_tasks_mm_cpumask(cpu); return 0; } @@ -385,12 +374,7 @@ asmlinkage void secondary_start_kernel(void) setup_priv(); - /* - * Enable local interrupts. - */ - tbi_startup_interrupt(TBID_SIGNUM_TRT); notify_cpu_starting(cpu); - local_irq_enable(); pr_info("CPU%u (thread %u): Booted secondary processor\n", cpu, cpu_2_hwthread_id[cpu]); @@ -402,12 +386,13 @@ asmlinkage void secondary_start_kernel(void) * OK, now it's safe to let the boot CPU continue */ set_cpu_online(cpu, true); + complete(&cpu_running); /* - * Check for cache aliasing. - * Preemption is disabled + * Enable local interrupts. */ - check_for_cache_aliasing(cpu); + tbi_startup_interrupt(TBID_SIGNUM_TRT); + local_irq_enable(); /* * OK, it's off to the idle thread for us diff --git a/arch/metag/kernel/time.c b/arch/metag/kernel/time.c index 17dc10733b2f..f1c8c53dace7 100644 --- a/arch/metag/kernel/time.c +++ b/arch/metag/kernel/time.c @@ -5,11 +5,21 @@ * */ -#include <linux/init.h> - #include <clocksource/metag_generic.h> +#include <linux/clk-provider.h> +#include <linux/init.h> +#include <asm/clock.h> void __init time_init(void) { +#ifdef CONFIG_COMMON_CLK + /* Init clocks from device tree */ + of_clk_init(NULL); +#endif + + /* Init meta clocks, particularly the core clock */ + init_metag_clocks(); + + /* Set up the timer clock sources */ metag_generic_timer_init(); } diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c index 2ceeaae5b199..c00ade0228ef 100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c @@ -33,6 +33,7 @@ #include <asm/siginfo.h> #include <asm/traps.h> #include <asm/hwthread.h> +#include <asm/setup.h> #include <asm/switch.h> #include <asm/user_gateway.h> #include <asm/syscall.h> @@ -87,8 +88,8 @@ const char *trap_name(int trapno) static DEFINE_SPINLOCK(die_lock); -void die(const char *str, struct pt_regs *regs, long err, - unsigned long addr) +void __noreturn die(const char *str, struct pt_regs *regs, + long err, unsigned long addr) { static int die_counter; diff --git a/arch/metag/lib/checksum.c b/arch/metag/lib/checksum.c index 44d2e1913560..5d6a98a05e9d 100644 --- a/arch/metag/lib/checksum.c +++ b/arch/metag/lib/checksum.c @@ -124,7 +124,6 @@ __wsum csum_partial(const void *buff, int len, __wsum wsum) result += 1; return (__force __wsum)result; } -EXPORT_SYMBOL(csum_partial); /* * this routine is used for miscellaneous IP-like checksums, mainly diff --git a/arch/metag/mm/cache.c b/arch/metag/mm/cache.c index b5d3b2e7c160..a62285284ab8 100644 --- a/arch/metag/mm/cache.c +++ b/arch/metag/mm/cache.c @@ -45,7 +45,7 @@ static volatile u32 lnkget_testdata[16] __initdata __aligned(64); #define LNKGET_CONSTANT 0xdeadbeef -void __init metag_lnkget_probe(void) +static void __init metag_lnkget_probe(void) { int temp; long flags; diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index 2c75bf7357c5..8fddf46e6c62 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c @@ -224,8 +224,10 @@ do_sigbus: */ out_of_memory: up_read(&mm->mmap_sem); - if (user_mode(regs)) - do_group_exit(SIGKILL); + if (user_mode(regs)) { + pagefault_out_of_memory(); + return 1; + } no_context: /* Are we prepared to handle this kernel fault? */ diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index d05b8455c44c..28813f164730 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c @@ -376,34 +376,21 @@ void __init paging_init(unsigned long mem_end) void __init mem_init(void) { - int nid; - #ifdef CONFIG_HIGHMEM unsigned long tmp; + + /* + * Explicitly reset zone->managed_pages because highmem pages are + * freed before calling free_all_bootmem(); + */ + reset_all_zones_managed_pages(); for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) free_highmem_page(pfn_to_page(tmp)); - num_physpages += totalhigh_pages; #endif /* CONFIG_HIGHMEM */ - for_each_online_node(nid) { - pg_data_t *pgdat = NODE_DATA(nid); - unsigned long node_pages = 0; - - num_physpages += pgdat->node_present_pages; - - if (pgdat->node_spanned_pages) - node_pages = free_all_bootmem_node(pgdat); - - totalram_pages += node_pages; - } - - pr_info("Memory: %luk/%luk available\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), - num_physpages << (PAGE_SHIFT - 10)); - + free_all_bootmem(); + mem_init_print_info(NULL); show_mem(0); - - return; } void free_initmem(void) @@ -414,7 +401,8 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig index 3649a8b150c0..deaf45ab6429 100644 --- a/arch/microblaze/configs/mmu_defconfig +++ b/arch/microblaze/configs/mmu_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_FHANDLE=y @@ -81,6 +80,9 @@ CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_SLAB=y CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_INFO=y +CONFIG_KGDB=y +CONFIG_KGDB_TESTS=y +CONFIG_KGDB_KDB=y CONFIG_EARLY_PRINTK=y CONFIG_KEYS=y CONFIG_ENCRYPTED_KEYS=y diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 85a5ae8e9bd0..fd850879854d 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -168,7 +168,6 @@ extern int page_is_ram(unsigned long pfn); # else /* CONFIG_MMU */ # define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT) # define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET)) -# define VALID_PAGE(page) ((page - mem_map) < max_mapnr) # endif /* CONFIG_MMU */ # endif /* __ASSEMBLY__ */ diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index 6dece2d002dc..b14232b6878f 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -38,4 +38,7 @@ #define __ARCH_WANT_SYS_FORK #endif /* __ASSEMBLY__ */ + +#define __NR_syscalls 381 + #endif /* _ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h index 5f7fe7582f3a..20043b67d158 100644 --- a/arch/microblaze/include/uapi/asm/unistd.h +++ b/arch/microblaze/include/uapi/asm/unistd.h @@ -397,6 +397,4 @@ #define __NR_kcmp 379 #define __NR_finit_module 380 -#define __NR_syscalls 381 - #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c index 8adc92443100..09a5e8286137 100644 --- a/arch/microblaze/kernel/kgdb.c +++ b/arch/microblaze/kernel/kgdb.c @@ -141,7 +141,7 @@ void kgdb_arch_exit(void) /* * Global data */ -const struct kgdb_arch arch_kgdb_ops = { +struct kgdb_arch arch_kgdb_ops = { #ifdef __MICROBLAZEEL__ .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */ #else diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index b38ae3acfeb4..74c7bcc1e82d 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -71,24 +71,17 @@ static void __init highmem_init(void) kmap_prot = PAGE_KERNEL; } -static unsigned long highmem_setup(void) +static void highmem_setup(void) { unsigned long pfn; - unsigned long reservedpages = 0; for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { struct page *page = pfn_to_page(pfn); /* FIXME not sure about */ - if (memblock_is_reserved(pfn << PAGE_SHIFT)) - continue; - free_highmem_page(page); - reservedpages++; + if (!memblock_is_reserved(pfn << PAGE_SHIFT)) + free_highmem_page(page); } - pr_info("High memory: %luk\n", - totalhigh_pages << (PAGE_SHIFT-10)); - - return reservedpages; } #endif /* CONFIG_HIGHMEM */ @@ -167,13 +160,12 @@ void __init setup_memory(void) * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) * max_low_pfn * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) - * num_physpages - number of all pages */ /* memory start is from the kernel end (aligned) to higher addr */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ /* RAM is assumed contiguous */ - num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; + max_mapnr = memory_size >> PAGE_SHIFT; max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; @@ -235,57 +227,26 @@ void __init setup_memory(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } void __init mem_init(void) { - pg_data_t *pgdat; - unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; - high_memory = (void *)__va(memory_start + lowmem_size - 1); /* this will put all memory onto the freelists */ - totalram_pages += free_all_bootmem(); - - for_each_online_pgdat(pgdat) { - unsigned long i; - struct page *page; - - for (i = 0; i < pgdat->node_spanned_pages; i++) { - if (!pfn_valid(pgdat->node_start_pfn + i)) - continue; - page = pgdat_page_nr(pgdat, i); - if (PageReserved(page)) - reservedpages++; - } - } - + free_all_bootmem(); #ifdef CONFIG_HIGHMEM - reservedpages -= highmem_setup(); + highmem_setup(); #endif - codesize = (unsigned long)&_sdata - (unsigned long)&_stext; - datasize = (unsigned long)&_edata - (unsigned long)&_sdata; - initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; - bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; - - pr_info("Memory: %luk/%luk available (%luk kernel code, ", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10); - pr_cont("%luk reserved, %luk data, %luk bss, %luk init)\n", - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - bsssize >> 10, - initsize >> 10); - + mem_init_print_info(NULL); #ifdef CONFIG_MMU pr_info("Kernel virtual memory layout:\n"); pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index 4b597d91a8d5..d9d81c219253 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms @@ -30,7 +30,6 @@ platforms += sibyte platforms += sni platforms += txx9 platforms += vr41xx -platforms += wrppmc # include the platform specific files include $(patsubst %, $(srctree)/arch/mips/%/Platform, $(platforms)) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e433b90507fb..4758a8fd3e99 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1,6 +1,7 @@ config MIPS bool default y + select HAVE_CONTEXT_TRACKING select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE select HAVE_OPROFILE @@ -27,6 +28,7 @@ config MIPS select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP select HAVE_ARCH_JUMP_LABEL select ARCH_WANT_IPC_PARSE_VERSION select IRQ_FORCED_THREADING @@ -42,12 +44,10 @@ config MIPS select MODULES_USE_ELF_REL if MODULES select MODULES_USE_ELF_RELA if MODULES && 64BIT select CLONE_BACKWARDS + select HAVE_DEBUG_STACKOVERFLOW menu "Machine selection" -config ZONE_DMA - bool - choice prompt "System type" default SGI_IP22 @@ -123,11 +123,14 @@ config BCM47XX config BCM63XX bool "Broadcom BCM63XX based boards" + select BOOT_RAW select CEVT_R4K select CSRC_R4K select DMA_NONCOHERENT select IRQ_CPU select SYS_HAS_CPU_MIPS32_R1 + select SYS_HAS_CPU_BMIPS4350 if !BCM63XX_CPU_6338 && !BCM63XX_CPU_6345 && !BCM63XX_CPU_6348 + select NR_CPUS_DEFAULT_2 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN select SYS_HAS_EARLY_PRINTK @@ -340,7 +343,6 @@ config MIPS_SEAD3 select DMA_NONCOHERENT select IRQ_CPU select IRQ_GIC - select MIPS_CPU_SCACHE select MIPS_MSC select SYS_HAS_CPU_MIPS32_R1 select SYS_HAS_CPU_MIPS32_R2 @@ -419,7 +421,6 @@ config POWERTV select CSRC_POWERTV select DMA_NONCOHERENT select HW_HAS_PCI - select SYS_HAS_EARLY_PRINTK select SYS_HAS_CPU_MIPS32_R2 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN @@ -712,46 +713,8 @@ config MIKROTIK_RB532 Support the Mikrotik(tm) RouterBoard 532 series, based on the IDT RC32434 SoC. -config WR_PPMC - bool "Wind River PPMC board" - select CEVT_R4K - select CSRC_R4K - select IRQ_CPU - select BOOT_ELF32 - select DMA_NONCOHERENT - select HW_HAS_PCI - select PCI_GT64XXX_PCI0 - select SWAP_IO_SPACE - select SYS_HAS_CPU_MIPS32_R1 - select SYS_HAS_CPU_MIPS32_R2 - select SYS_HAS_CPU_MIPS64_R1 - select SYS_HAS_CPU_NEVADA - select SYS_HAS_CPU_RM7000 - select SYS_SUPPORTS_32BIT_KERNEL - select SYS_SUPPORTS_64BIT_KERNEL - select SYS_SUPPORTS_BIG_ENDIAN - select SYS_SUPPORTS_LITTLE_ENDIAN - help - This enables support for the Wind River MIPS32 4KC PPMC evaluation - board, which is based on GT64120 bridge chip. - -config CAVIUM_OCTEON_SIMULATOR - bool "Cavium Networks Octeon Simulator" - select CEVT_R4K - select 64BIT_PHYS_ADDR - select DMA_COHERENT - select SYS_SUPPORTS_64BIT_KERNEL - select SYS_SUPPORTS_BIG_ENDIAN - select SYS_SUPPORTS_HOTPLUG_CPU - select SYS_HAS_CPU_CAVIUM_OCTEON - select HOLES_IN_ZONE - help - The Octeon simulator is software performance model of the Cavium - Octeon Processor. It supports simulating Octeon processors on x86 - hardware. - -config CAVIUM_OCTEON_REFERENCE_BOARD - bool "Cavium Networks Octeon reference board" +config CAVIUM_OCTEON_SOC + bool "Cavium Networks Octeon SoC based boards" select CEVT_R4K select 64BIT_PHYS_ADDR select DMA_COHERENT @@ -805,6 +768,8 @@ config NLM_XLR_BOARD select SYS_HAS_EARLY_PRINTK select USB_ARCH_HAS_OHCI if USB_SUPPORT select USB_ARCH_HAS_EHCI if USB_SUPPORT + select SYS_SUPPORTS_ZBOOT + select SYS_SUPPORTS_ZBOOT_UART16550 help Support for systems based on Netlogic XLR and XLS processors. Say Y here if you have a XLR or XLS based board. @@ -831,6 +796,8 @@ config NLM_XLP_BOARD select SYNC_R4K select SYS_HAS_EARLY_PRINTK select USE_OF + select SYS_SUPPORTS_ZBOOT + select SYS_SUPPORTS_ZBOOT_UART16550 help This board is based on Netlogic XLP Processor. Say Y here if you have a XLP based board. @@ -1030,7 +997,6 @@ config CPU_BIG_ENDIAN config CPU_LITTLE_ENDIAN bool "Little endian" depends on SYS_SUPPORTS_LITTLE_ENDIAN - help endchoice @@ -1963,7 +1929,7 @@ config MIPS_MT_FPAFF config MIPS_VPE_LOADER bool "VPE loader support." - depends on SYS_SUPPORTS_MULTITHREADING + depends on SYS_SUPPORTS_MULTITHREADING && MODULES select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select MIPS_MT @@ -2381,6 +2347,19 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. +config CC_STACKPROTECTOR + bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" + help + This option turns on the -fstack-protector GCC feature. This + feature puts, at the beginning of functions, a canary value on + the stack just before the return address, and validates + the value just before actually returning. Stack based buffer + overflows (that need to overwrite this return address) now also + overwrite the canary, which gets detected and the attack is then + neutralized via a kernel panic. + + This feature requires gcc version 4.2 or above. + config USE_OF bool select OF @@ -2412,7 +2391,6 @@ config PCI bool "Support for PCI controller" depends on HW_HAS_PCI select PCI_DOMAINS - select GENERIC_PCI_IOMAP select NO_GENERIC_PCI_IOPORT_MAP help Find out whether you have a PCI motherboard. PCI is the name of a @@ -2478,6 +2456,9 @@ config I8253 select CLKEVT_I8253 select MIPS_EXTERNAL_TIMER +config ZONE_DMA + bool + config ZONE_DMA32 bool diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 5a43aa0798ca..37871f0de15e 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -67,15 +67,6 @@ config CMDLINE_OVERRIDE Normally, you will choose 'N' here. -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - help - This option will cause messages to be printed if free stack space - drops below a certain limit(2GB on MIPS). The debugging option - provides another way to check stack overflow happened on kernel mode - stack usually caused by nested interruption. - config SMTC_IDLE_HOOK_DEBUG bool "Enable additional debug checks before going into CPU idle loop" depends on DEBUG_KERNEL && MIPS_MT_SMTC diff --git a/arch/mips/Makefile b/arch/mips/Makefile index dd58a04ef4bc..37f9ef324f2f 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -227,6 +227,10 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) LDFLAGS += -m $(ld-emul) +ifdef CONFIG_CC_STACKPROTECTOR + KBUILD_CFLAGS += -fstack-protector +endif + ifdef CONFIG_MIPS CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ diff --git a/arch/mips/ath79/mach-ap136.c b/arch/mips/ath79/mach-ap136.c index 479dd4b1d0d2..07eac58c3641 100644 --- a/arch/mips/ath79/mach-ap136.c +++ b/arch/mips/ath79/mach-ap136.c @@ -132,7 +132,7 @@ static void __init ap136_pci_init(u8 *eeprom) ath79_register_pci(); } #else -static inline void ap136_pci_init(void) {} +static inline void ap136_pci_init(u8 *eeprom) {} #endif /* CONFIG_PCI */ static void __init ap136_setup(void) diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig index 5639662fd503..b78306ce56c7 100644 --- a/arch/mips/bcm63xx/Kconfig +++ b/arch/mips/bcm63xx/Kconfig @@ -1,6 +1,10 @@ menu "CPU support" depends on BCM63XX +config BCM63XX_CPU_3368 + bool "support 3368 CPU" + select HW_HAS_PCI + config BCM63XX_CPU_6328 bool "support 6328 CPU" select HW_HAS_PCI @@ -8,14 +12,9 @@ config BCM63XX_CPU_6328 config BCM63XX_CPU_6338 bool "support 6338 CPU" select HW_HAS_PCI - select USB_ARCH_HAS_OHCI - select USB_OHCI_BIG_ENDIAN_DESC - select USB_OHCI_BIG_ENDIAN_MMIO config BCM63XX_CPU_6345 bool "support 6345 CPU" - select USB_OHCI_BIG_ENDIAN_DESC - select USB_OHCI_BIG_ENDIAN_MMIO config BCM63XX_CPU_6348 bool "support 6348 CPU" diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index a9505c4867e8..5b974eb125fc 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -28,11 +28,47 @@ #include <bcm63xx_dev_usb_usbd.h> #include <board_bcm963xx.h> +#include <uapi/linux/bcm933xx_hcs.h> + #define PFX "board_bcm963xx: " +#define HCS_OFFSET_128K 0x20000 + static struct board_info board; /* + * known 3368 boards + */ +#ifdef CONFIG_BCM63XX_CPU_3368 +static struct board_info __initdata board_cvg834g = { + .name = "CVG834G_E15R3921", + .expected_cpu_id = 0x3368, + + .has_uart0 = 1, + .has_uart1 = 1, + + .has_enet0 = 1, + .has_pci = 1, + + .enet0 = { + .has_phy = 1, + .use_internal_phy = 1, + }, + + .leds = { + { + .name = "CVG834G:green:power", + .gpio = 37, + .default_trigger= "default-on", + }, + }, + + .ephy_reset_gpio = 36, + .ephy_reset_gpio_flags = GPIOF_INIT_HIGH, +}; +#endif + +/* * known 6328 boards */ #ifdef CONFIG_BCM63XX_CPU_6328 @@ -639,6 +675,9 @@ static struct board_info __initdata board_DWVS0 = { * all boards */ static const struct board_info __initconst *bcm963xx_boards[] = { +#ifdef CONFIG_BCM63XX_CPU_3368 + &board_cvg834g, +#endif #ifdef CONFIG_BCM63XX_CPU_6328 &board_96328avng, #endif @@ -722,8 +761,9 @@ void __init board_prom_init(void) unsigned int i; u8 *boot_addr, *cfe; char cfe_version[32]; - char *board_name; + char *board_name = NULL; u32 val; + struct bcm_hcs *hcs; /* read base address of boot chip select (0) * 6328/6362 do not have MPI but boot from a fixed address @@ -747,7 +787,12 @@ void __init board_prom_init(void) bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); - board_name = bcm63xx_nvram_get_name(); + if (BCMCPU_IS_3368()) { + hcs = (struct bcm_hcs *)boot_addr; + board_name = hcs->filename; + } else { + board_name = bcm63xx_nvram_get_name(); + } /* find board by name */ for (i = 0; i < ARRAY_SIZE(bcm963xx_boards); i++) { if (strncmp(board_name, bcm963xx_boards[i]->name, 16)) @@ -845,6 +890,10 @@ int __init board_register_devices(void) !bcm63xx_nvram_get_mac_address(board.enet1.mac_addr)) bcm63xx_enet_register(1, &board.enet1); + if (board.has_enetsw && + !bcm63xx_nvram_get_mac_address(board.enetsw.mac_addr)) + bcm63xx_enetsw_register(&board.enetsw); + if (board.has_usbd) bcm63xx_usbd_register(&board.usbd); @@ -873,5 +922,9 @@ int __init board_register_devices(void) platform_device_register(&bcm63xx_gpio_leds); + if (board.ephy_reset_gpio && board.ephy_reset_gpio_flags) + gpio_request_one(board.ephy_reset_gpio, + board.ephy_reset_gpio_flags, "ephy-reset"); + return 0; } diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index c726a97fc798..43da4ae04cc2 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -84,7 +84,7 @@ static void enetx_set(struct clk *clk, int enable) else clk_disable_unlocked(&clk_enet_misc); - if (BCMCPU_IS_6358()) { + if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) { u32 mask; if (clk->id == 0) @@ -110,9 +110,8 @@ static struct clk clk_enet1 = { */ static void ephy_set(struct clk *clk, int enable) { - if (!BCMCPU_IS_6358()) - return; - bcm_hwclock_set(CKCTL_6358_EPHY_EN, enable); + if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) + bcm_hwclock_set(CKCTL_6358_EPHY_EN, enable); } @@ -155,9 +154,10 @@ static struct clk clk_enetsw = { */ static void pcm_set(struct clk *clk, int enable) { - if (!BCMCPU_IS_6358()) - return; - bcm_hwclock_set(CKCTL_6358_PCM_EN, enable); + if (BCMCPU_IS_3368()) + bcm_hwclock_set(CKCTL_3368_PCM_EN, enable); + if (BCMCPU_IS_6358()) + bcm_hwclock_set(CKCTL_6358_PCM_EN, enable); } static struct clk clk_pcm = { @@ -211,7 +211,7 @@ static void spi_set(struct clk *clk, int enable) mask = CKCTL_6338_SPI_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_SPI_EN; - else if (BCMCPU_IS_6358()) + else if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) mask = CKCTL_6358_SPI_EN; else if (BCMCPU_IS_6362()) mask = CKCTL_6362_SPI_EN; @@ -318,6 +318,18 @@ unsigned long clk_get_rate(struct clk *clk) EXPORT_SYMBOL(clk_get_rate); +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} +EXPORT_SYMBOL_GPL(clk_set_rate); + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} +EXPORT_SYMBOL_GPL(clk_round_rate); + struct clk *clk_get(struct device *dev, const char *id) { if (!strcmp(id, "enet0")) @@ -338,7 +350,7 @@ struct clk *clk_get(struct device *dev, const char *id) return &clk_xtm; if (!strcmp(id, "periph")) return &clk_periph; - if (BCMCPU_IS_6358() && !strcmp(id, "pcm")) + if ((BCMCPU_IS_3368() || BCMCPU_IS_6358()) && !strcmp(id, "pcm")) return &clk_pcm; if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec")) return &clk_ipsec; diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c index 79fe32df5e96..7e17374a9ae8 100644 --- a/arch/mips/bcm63xx/cpu.c +++ b/arch/mips/bcm63xx/cpu.c @@ -29,6 +29,14 @@ static u8 bcm63xx_cpu_rev; static unsigned int bcm63xx_cpu_freq; static unsigned int bcm63xx_memory_size; +static const unsigned long bcm3368_regs_base[] = { + __GEN_CPU_REGS_TABLE(3368) +}; + +static const int bcm3368_irqs[] = { + __GEN_CPU_IRQ_TABLE(3368) +}; + static const unsigned long bcm6328_regs_base[] = { __GEN_CPU_REGS_TABLE(6328) }; @@ -116,6 +124,9 @@ unsigned int bcm63xx_get_memory_size(void) static unsigned int detect_cpu_clock(void) { switch (bcm63xx_get_cpu_id()) { + case BCM3368_CPU_ID: + return 300000000; + case BCM6328_CPU_ID: { unsigned int tmp, mips_pll_fcvo; @@ -266,7 +277,7 @@ static unsigned int detect_memory_size(void) banks = (val & SDRAM_CFG_BANK_MASK) ? 2 : 1; } - if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { + if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) { val = bcm_memc_readl(MEMC_CFG_REG); rows = (val & MEMC_CFG_ROW_MASK) >> MEMC_CFG_ROW_SHIFT; cols = (val & MEMC_CFG_COL_MASK) >> MEMC_CFG_COL_SHIFT; @@ -302,10 +313,17 @@ void __init bcm63xx_cpu_init(void) chipid_reg = BCM_6345_PERF_BASE; break; case CPU_BMIPS4350: - if ((read_c0_prid() & 0xf0) == 0x10) + switch ((read_c0_prid() & 0xff)) { + case 0x04: + chipid_reg = BCM_3368_PERF_BASE; + break; + case 0x10: chipid_reg = BCM_6345_PERF_BASE; - else + break; + default: chipid_reg = BCM_6368_PERF_BASE; + break; + } break; } @@ -322,6 +340,10 @@ void __init bcm63xx_cpu_init(void) bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT; switch (bcm63xx_cpu_id) { + case BCM3368_CPU_ID: + bcm63xx_regs_base = bcm3368_regs_base; + bcm63xx_irqs = bcm3368_irqs; + break; case BCM6328_CPU_ID: bcm63xx_regs_base = bcm6328_regs_base; bcm63xx_irqs = bcm6328_irqs; diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index 39c23366c5c7..52bc01df9bfe 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -9,16 +9,60 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> +#include <linux/export.h> #include <bcm63xx_dev_enet.h> #include <bcm63xx_io.h> #include <bcm63xx_regs.h> +#ifdef BCMCPU_RUNTIME_DETECT +static const unsigned long bcm6348_regs_enetdmac[] = { + [ENETDMAC_CHANCFG] = ENETDMAC_CHANCFG_REG, + [ENETDMAC_IR] = ENETDMAC_IR_REG, + [ENETDMAC_IRMASK] = ENETDMAC_IRMASK_REG, + [ENETDMAC_MAXBURST] = ENETDMAC_MAXBURST_REG, +}; + +static const unsigned long bcm6345_regs_enetdmac[] = { + [ENETDMAC_CHANCFG] = ENETDMA_6345_CHANCFG_REG, + [ENETDMAC_IR] = ENETDMA_6345_IR_REG, + [ENETDMAC_IRMASK] = ENETDMA_6345_IRMASK_REG, + [ENETDMAC_MAXBURST] = ENETDMA_6345_MAXBURST_REG, + [ENETDMAC_BUFALLOC] = ENETDMA_6345_BUFALLOC_REG, + [ENETDMAC_RSTART] = ENETDMA_6345_RSTART_REG, + [ENETDMAC_FC] = ENETDMA_6345_FC_REG, + [ENETDMAC_LEN] = ENETDMA_6345_LEN_REG, +}; + +const unsigned long *bcm63xx_regs_enetdmac; +EXPORT_SYMBOL(bcm63xx_regs_enetdmac); + +static __init void bcm63xx_enetdmac_regs_init(void) +{ + if (BCMCPU_IS_6345()) + bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac; + else + bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac; +} +#else +static __init void bcm63xx_enetdmac_regs_init(void) { } +#endif + static struct resource shared_res[] = { { .start = -1, /* filled at runtime */ .end = -1, /* filled at runtime */ .flags = IORESOURCE_MEM, }, + { + .start = -1, /* filled at runtime */ + .end = -1, /* filled at runtime */ + .flags = IORESOURCE_MEM, + }, + { + .start = -1, /* filled at runtime */ + .end = -1, /* filled at runtime */ + .flags = IORESOURCE_MEM, + }, }; static struct platform_device bcm63xx_enet_shared_device = { @@ -94,6 +138,71 @@ static struct platform_device bcm63xx_enet1_device = { }, }; +static struct resource enetsw_res[] = { + { + /* start & end filled at runtime */ + .flags = IORESOURCE_MEM, + }, + { + /* start filled at runtime */ + .flags = IORESOURCE_IRQ, + }, + { + /* start filled at runtime */ + .flags = IORESOURCE_IRQ, + }, +}; + +static struct bcm63xx_enetsw_platform_data enetsw_pd; + +static struct platform_device bcm63xx_enetsw_device = { + .name = "bcm63xx_enetsw", + .num_resources = ARRAY_SIZE(enetsw_res), + .resource = enetsw_res, + .dev = { + .platform_data = &enetsw_pd, + }, +}; + +static int __init register_shared(void) +{ + int ret, chan_count; + + if (shared_device_registered) + return 0; + + bcm63xx_enetdmac_regs_init(); + + shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); + shared_res[0].end = shared_res[0].start; + if (BCMCPU_IS_6345()) + shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1; + else + shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; + + if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) + chan_count = 32; + else if (BCMCPU_IS_6345()) + chan_count = 8; + else + chan_count = 16; + + shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC); + shared_res[1].end = shared_res[1].start; + shared_res[1].end += RSET_ENETDMAC_SIZE(chan_count) - 1; + + shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS); + shared_res[2].end = shared_res[2].start; + shared_res[2].end += RSET_ENETDMAS_SIZE(chan_count) - 1; + + ret = platform_device_register(&bcm63xx_enet_shared_device); + if (ret) + return ret; + shared_device_registered = 1; + + return 0; +} + int __init bcm63xx_enet_register(int unit, const struct bcm63xx_enet_platform_data *pd) { @@ -104,22 +213,12 @@ int __init bcm63xx_enet_register(int unit, if (unit > 1) return -ENODEV; - if (unit == 1 && BCMCPU_IS_6338()) + if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345())) return -ENODEV; - if (!shared_device_registered) { - shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); - shared_res[0].end = shared_res[0].start; - if (BCMCPU_IS_6338()) - shared_res[0].end += (RSET_ENETDMA_SIZE / 2) - 1; - else - shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; - - ret = platform_device_register(&bcm63xx_enet_shared_device); - if (ret) - return ret; - shared_device_registered = 1; - } + ret = register_shared(); + if (ret) + return ret; if (unit == 0) { enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0); @@ -155,8 +254,62 @@ int __init bcm63xx_enet_register(int unit, dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY); } + dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK; + dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK; + if (BCMCPU_IS_6345()) { + dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK; + dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK; + dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK; + dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK; + dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK; + dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH; + dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT; + } else { + dpd->dma_has_sram = true; + dpd->dma_chan_width = ENETDMA_CHAN_WIDTH; + } + ret = platform_device_register(pdev); if (ret) return ret; return 0; } + +int __init +bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd) +{ + int ret; + + if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362() && !BCMCPU_IS_6368()) + return -ENODEV; + + ret = register_shared(); + if (ret) + return ret; + + enetsw_res[0].start = bcm63xx_regset_address(RSET_ENETSW); + enetsw_res[0].end = enetsw_res[0].start; + enetsw_res[0].end += RSET_ENETSW_SIZE - 1; + enetsw_res[1].start = bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0); + enetsw_res[2].start = bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0); + if (!enetsw_res[2].start) + enetsw_res[2].start = -1; + + memcpy(bcm63xx_enetsw_device.dev.platform_data, pd, sizeof(*pd)); + + if (BCMCPU_IS_6328()) + enetsw_pd.num_ports = ENETSW_PORTS_6328; + else if (BCMCPU_IS_6362() || BCMCPU_IS_6368()) + enetsw_pd.num_ports = ENETSW_PORTS_6368; + + enetsw_pd.dma_has_sram = true; + enetsw_pd.dma_chan_width = ENETDMA_CHAN_WIDTH; + enetsw_pd.dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK; + enetsw_pd.dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK; + + ret = platform_device_register(&bcm63xx_enetsw_device); + if (ret) + return ret; + + return 0; +} diff --git a/arch/mips/bcm63xx/dev-flash.c b/arch/mips/bcm63xx/dev-flash.c index 588d1ec622e4..172dd8397178 100644 --- a/arch/mips/bcm63xx/dev-flash.c +++ b/arch/mips/bcm63xx/dev-flash.c @@ -71,6 +71,7 @@ static int __init bcm63xx_detect_flash_type(void) case BCM6348_CPU_ID: /* no way to auto detect so assume parallel */ return BCM63XX_FLASH_TYPE_PARALLEL; + case BCM3368_CPU_ID: case BCM6358_CPU_ID: val = bcm_gpio_readl(GPIO_STRAPBUS_REG); if (val & STRAPBUS_6358_BOOT_SEL_PARALLEL) diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c index 3065bb61820d..d12daed749bc 100644 --- a/arch/mips/bcm63xx/dev-spi.c +++ b/arch/mips/bcm63xx/dev-spi.c @@ -37,7 +37,8 @@ static __init void bcm63xx_spi_regs_init(void) { if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) bcm63xx_regs_spi = bcm6348_regs_spi; - if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) + if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || + BCMCPU_IS_6362() || BCMCPU_IS_6368()) bcm63xx_regs_spi = bcm6358_regs_spi; } #else @@ -87,7 +88,8 @@ int __init bcm63xx_spi_register(void) spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH; } - if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) { + if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6362() || + BCMCPU_IS_6368()) { spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c index d6e42c608325..3bc7f3bfc9ad 100644 --- a/arch/mips/bcm63xx/dev-uart.c +++ b/arch/mips/bcm63xx/dev-uart.c @@ -54,7 +54,8 @@ int __init bcm63xx_uart_register(unsigned int id) if (id >= ARRAY_SIZE(bcm63xx_uart_devices)) return -ENODEV; - if (id == 1 && (!BCMCPU_IS_6358() && !BCMCPU_IS_6368())) + if (id == 1 && (!BCMCPU_IS_3368() && !BCMCPU_IS_6358() && + !BCMCPU_IS_6368())) return -ENODEV; if (id == 0) { diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index c0ab3887f42e..1525f8a3841b 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c @@ -27,6 +27,17 @@ static void __internal_irq_unmask_32(unsigned int irq) __maybe_unused; static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused; #ifndef BCMCPU_RUNTIME_DETECT +#ifdef CONFIG_BCM63XX_CPU_3368 +#define irq_stat_reg PERF_IRQSTAT_3368_REG +#define irq_mask_reg PERF_IRQMASK_3368_REG +#define irq_bits 32 +#define is_ext_irq_cascaded 0 +#define ext_irq_start 0 +#define ext_irq_end 0 +#define ext_irq_count 4 +#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_3368 +#define ext_irq_cfg_reg2 0 +#endif #ifdef CONFIG_BCM63XX_CPU_6328 #define irq_stat_reg PERF_IRQSTAT_6328_REG #define irq_mask_reg PERF_IRQMASK_6328_REG @@ -140,6 +151,13 @@ static void bcm63xx_init_irq(void) irq_mask_addr = bcm63xx_regset_address(RSET_PERF); switch (bcm63xx_get_cpu_id()) { + case BCM3368_CPU_ID: + irq_stat_addr += PERF_IRQSTAT_3368_REG; + irq_mask_addr += PERF_IRQMASK_3368_REG; + irq_bits = 32; + ext_irq_count = 4; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368; + break; case BCM6328_CPU_ID: irq_stat_addr += PERF_IRQSTAT_6328_REG; irq_mask_addr += PERF_IRQMASK_6328_REG; @@ -294,6 +312,10 @@ asmlinkage void plat_irq_dispatch(void) if (cause & CAUSEF_IP7) do_IRQ(7); + if (cause & CAUSEF_IP0) + do_IRQ(0); + if (cause & CAUSEF_IP1) + do_IRQ(1); if (cause & CAUSEF_IP2) dispatch_internal(); if (!is_ext_irq_cascaded) { @@ -475,6 +497,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d, reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq); break; + case BCM3368_CPU_ID: case BCM6328_CPU_ID: case BCM6338_CPU_ID: case BCM6345_CPU_ID: diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index a4b8864f9307..e652e578a679 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c @@ -42,6 +42,7 @@ void __init bcm63xx_nvram_init(void *addr) { unsigned int check_len; u32 crc, expected_crc; + u8 hcs_mac_addr[ETH_ALEN] = { 0x00, 0x10, 0x18, 0xff, 0xff, 0xff }; /* extract nvram data */ memcpy(&nvram, addr, sizeof(nvram)); @@ -62,6 +63,15 @@ void __init bcm63xx_nvram_init(void *addr) if (crc != expected_crc) pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", expected_crc, crc); + + /* Cable modems have a different NVRAM which is embedded in the eCos + * firmware and not easily extractible, give at least a MAC address + * pool. + */ + if (BCMCPU_IS_3368()) { + memcpy(nvram.mac_addr_base, hcs_mac_addr, ETH_ALEN); + nvram.mac_addr_count = 2; + } } u8 *bcm63xx_nvram_get_name(void) diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c index fd698087fbfd..8ac4e095e68e 100644 --- a/arch/mips/bcm63xx/prom.c +++ b/arch/mips/bcm63xx/prom.c @@ -8,7 +8,11 @@ #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/smp.h> #include <asm/bootinfo.h> +#include <asm/bmips.h> +#include <asm/smp-ops.h> +#include <asm/mipsregs.h> #include <bcm63xx_board.h> #include <bcm63xx_cpu.h> #include <bcm63xx_io.h> @@ -26,7 +30,9 @@ void __init prom_init(void) bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG); /* disable all hardware blocks clock for now */ - if (BCMCPU_IS_6328()) + if (BCMCPU_IS_3368()) + mask = CKCTL_3368_ALL_SAFE_EN; + else if (BCMCPU_IS_6328()) mask = CKCTL_6328_ALL_SAFE_EN; else if (BCMCPU_IS_6338()) mask = CKCTL_6338_ALL_SAFE_EN; @@ -52,6 +58,47 @@ void __init prom_init(void) /* do low level board init */ board_prom_init(); + + if (IS_ENABLED(CONFIG_CPU_BMIPS4350) && IS_ENABLED(CONFIG_SMP)) { + /* set up SMP */ + register_smp_ops(&bmips_smp_ops); + + /* + * BCM6328 might not have its second CPU enabled, while BCM6358 + * needs special handling for its shared TLB, so disable SMP + * for now. + */ + if (BCMCPU_IS_6328()) { + reg = bcm_readl(BCM_6328_OTP_BASE + + OTP_USER_BITS_6328_REG(3)); + + if (reg & OTP_6328_REG3_TP1_DISABLED) + bmips_smp_enabled = 0; + } else if (BCMCPU_IS_6358()) { + bmips_smp_enabled = 0; + } + + if (!bmips_smp_enabled) + return; + + /* + * The bootloader has set up the CPU1 reset vector at + * 0xa000_0200. + * This conflicts with the special interrupt vector (IV). + * The bootloader has also set up CPU1 to respond to the wrong + * IPI interrupt. + * Here we will start up CPU1 in the background and ask it to + * reconfigure itself then go back to sleep. + */ + memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20); + __sync(); + set_c0_cause(C_SW0); + cpumask_set_cpu(1, &bmips_booted_mask); + + /* + * FIXME: we really should have some sort of hazard barrier here + */ + } } void __init prom_free_prom_memory(void) diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c index 317931c6cf58..acbeb1fe7c57 100644 --- a/arch/mips/bcm63xx/reset.c +++ b/arch/mips/bcm63xx/reset.c @@ -30,6 +30,19 @@ [BCM63XX_RESET_PCIE] = BCM## __cpu ##_RESET_PCIE, \ [BCM63XX_RESET_PCIE_EXT] = BCM## __cpu ##_RESET_PCIE_EXT, +#define BCM3368_RESET_SPI SOFTRESET_3368_SPI_MASK +#define BCM3368_RESET_ENET SOFTRESET_3368_ENET_MASK +#define BCM3368_RESET_USBH 0 +#define BCM3368_RESET_USBD SOFTRESET_3368_USBS_MASK +#define BCM3368_RESET_DSL 0 +#define BCM3368_RESET_SAR 0 +#define BCM3368_RESET_EPHY SOFTRESET_3368_EPHY_MASK +#define BCM3368_RESET_ENETSW 0 +#define BCM3368_RESET_PCM SOFTRESET_3368_PCM_MASK +#define BCM3368_RESET_MPI SOFTRESET_3368_MPI_MASK +#define BCM3368_RESET_PCIE 0 +#define BCM3368_RESET_PCIE_EXT 0 + #define BCM6328_RESET_SPI SOFTRESET_6328_SPI_MASK #define BCM6328_RESET_ENET 0 #define BCM6328_RESET_USBH SOFTRESET_6328_USBH_MASK @@ -117,6 +130,10 @@ /* * core reset bits */ +static const u32 bcm3368_reset_bits[] = { + __GEN_RESET_BITS_TABLE(3368) +}; + static const u32 bcm6328_reset_bits[] = { __GEN_RESET_BITS_TABLE(6328) }; @@ -146,7 +163,10 @@ static int reset_reg; static int __init bcm63xx_reset_bits_init(void) { - if (BCMCPU_IS_6328()) { + if (BCMCPU_IS_3368()) { + reset_reg = PERF_SOFTRESET_6358_REG; + bcm63xx_reset_bits = bcm3368_reset_bits; + } else if (BCMCPU_IS_6328()) { reset_reg = PERF_SOFTRESET_6328_REG; bcm63xx_reset_bits = bcm6328_reset_bits; } else if (BCMCPU_IS_6338()) { @@ -170,6 +190,13 @@ static int __init bcm63xx_reset_bits_init(void) } #else +#ifdef CONFIG_BCM63XX_CPU_3368 +static const u32 bcm63xx_reset_bits[] = { + __GEN_RESET_BITS_TABLE(3368) +}; +#define reset_reg PERF_SOFTRESET_6358_REG +#endif + #ifdef CONFIG_BCM63XX_CPU_6328 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6328) diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 24a24445db64..6660c7ddf87b 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -68,6 +68,9 @@ void bcm63xx_machine_reboot(void) /* mask and clear all external irq */ switch (bcm63xx_get_cpu_id()) { + case BCM3368_CPU_ID: + perf_regs[0] = PERF_EXTIRQ_CFG_REG_3368; + break; case BCM6328_CPU_ID: perf_regs[0] = PERF_EXTIRQ_CFG_REG_6328; break; diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index bbaa1d4beb6d..bb1dbf4abb9d 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -18,6 +18,8 @@ BOOT_HEAP_SIZE := 0x400000 # Disable Function Tracer KBUILD_CFLAGS := $(shell echo $(KBUILD_CFLAGS) | sed -e "s/-pg//") +KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS)) + KBUILD_CFLAGS := $(LINUXINCLUDE) $(KBUILD_CFLAGS) -D__KERNEL__ \ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull" diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c index 1c7b739b6a1d..c01d343ce6ad 100644 --- a/arch/mips/boot/compressed/uart-16550.c +++ b/arch/mips/boot/compressed/uart-16550.c @@ -23,23 +23,39 @@ #define PORT(offset) (UART0_BASE + (4 * offset)) #endif +#ifdef CONFIG_CPU_XLR +#define UART0_BASE 0x1EF14000 +#define PORT(offset) (CKSEG1ADDR(UART0_BASE) + (4 * offset)) +#define IOTYPE unsigned int +#endif + +#ifdef CONFIG_CPU_XLP +#define UART0_BASE 0x18030100 +#define PORT(offset) (CKSEG1ADDR(UART0_BASE) + (4 * offset)) +#define IOTYPE unsigned int +#endif + +#ifndef IOTYPE +#define IOTYPE char +#endif + #ifndef PORT #error please define the serial port address for your own machine #endif static inline unsigned int serial_in(int offset) { - return *((char *)PORT(offset)); + return *((volatile IOTYPE *)PORT(offset)) & 0xFF; } static inline void serial_out(int offset, int value) { - *((char *)PORT(offset)) = value; + *((volatile IOTYPE *)PORT(offset)) = value & 0xFF; } void putc(char c) { - int timeout = 1024; + int timeout = 1000000; while (((serial_in(UART_LSR) & UART_LSR_THRE) == 0) && (timeout-- > 0)) ; diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index 75a6df7fd265..227705d9d5ae 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig @@ -10,6 +10,10 @@ config CAVIUM_CN63XXP1 non-CN63XXP1 hardware, so it is recommended to select "n" unless it is known the workarounds are needed. +endif # CPU_CAVIUM_OCTEON + +if CAVIUM_OCTEON_SOC + config CAVIUM_OCTEON_2ND_KERNEL bool "Build the kernel to be used as a 2nd kernel on the same chip" default "n" @@ -19,17 +23,6 @@ config CAVIUM_OCTEON_2ND_KERNEL with this option to be run at the same time as one built without this option. -config CAVIUM_OCTEON_HW_FIX_UNALIGNED - bool "Enable hardware fixups of unaligned loads and stores" - default "y" - help - Configure the Octeon hardware to automatically fix unaligned loads - and stores. Normally unaligned accesses are fixed using a kernel - exception handler. This option enables the hardware automatic fixups, - which requires only an extra 3 cycles. Disable this option if you - are running code that relies on address exceptions on unaligned - accesses. - config CAVIUM_OCTEON_CVMSEG_SIZE int "Number of L1 cache lines reserved for CVMSEG memory" range 0 54 @@ -103,4 +96,4 @@ config OCTEON_ILM To compile this driver as a module, choose M here. The module will be called octeon-ilm -endif # CPU_CAVIUM_OCTEON +endif # CAVIUM_OCTEON_SOC diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile index 3595affb9772..4e952043c922 100644 --- a/arch/mips/cavium-octeon/Makefile +++ b/arch/mips/cavium-octeon/Makefile @@ -12,11 +12,12 @@ CFLAGS_octeon-platform.o = -I$(src)/../../../scripts/dtc/libfdt CFLAGS_setup.o = -I$(src)/../../../scripts/dtc/libfdt -obj-y := cpu.o setup.o serial.o octeon-platform.o octeon-irq.o csrc-octeon.o -obj-y += dma-octeon.o flash_setup.o +obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o +obj-y += dma-octeon.o obj-y += octeon-memcpy.o obj-y += executive/ +obj-$(CONFIG_MTD) += flash_setup.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_OCTEON_ILM) += oct_ilm.o diff --git a/arch/mips/cavium-octeon/Platform b/arch/mips/cavium-octeon/Platform index 1e43ccf1a792..8a301cb12d68 100644 --- a/arch/mips/cavium-octeon/Platform +++ b/arch/mips/cavium-octeon/Platform @@ -1,11 +1,11 @@ # # Cavium Octeon # -platform-$(CONFIG_CPU_CAVIUM_OCTEON) += cavium-octeon/ -cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += \ +platform-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon/ +cflags-$(CONFIG_CAVIUM_OCTEON_SOC) += \ -I$(srctree)/arch/mips/include/asm/mach-cavium-octeon ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL -load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff84100000 +load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff84100000 else -load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff81100000 +load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff81100000 endif diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index 7c6497781895..0a1283ce47f5 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -181,6 +181,11 @@ int cvmx_helper_board_get_mii_address(int ipd_port) return ipd_port - 16 + 4; else return -1; + case CVMX_BOARD_TYPE_UBNT_E100: + if (ipd_port >= 0 && ipd_port <= 2) + return 7 - ipd_port; + else + return -1; } /* Some unknown board. Somebody forgot to update this function... */ @@ -706,6 +711,14 @@ int __cvmx_helper_board_hardware_enable(int interface) } } } + } else if (cvmx_sysinfo_get()->board_type == + CVMX_BOARD_TYPE_UBNT_E100) { + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0); + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0x10); + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0); + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0x10); + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(2, interface), 0); + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(2, interface), 0x10); } return 0; } diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 389512e2abd6..7b746e7bf7a1 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -490,8 +490,15 @@ int __init octeon_prune_device_tree(void) if (alias_prop) { uart = fdt_path_offset(initial_boot_params, alias_prop); - if (uart_mask & (1 << i)) + if (uart_mask & (1 << i)) { + __be32 f; + + f = cpu_to_be32(octeon_get_io_clock_rate()); + fdt_setprop_inplace(initial_boot_params, + uart, "clock-frequency", + &f, sizeof(f)); continue; + } pr_debug("Deleting uart%d\n", i); fdt_nop_node(initial_boot_params, uart); fdt_nop_property(initial_boot_params, aliases, diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c deleted file mode 100644 index f393f65f3923..000000000000 --- a/arch/mips/cavium-octeon/serial.c +++ /dev/null @@ -1,109 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2004-2007 Cavium Networks - */ -#include <linux/console.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/serial.h> -#include <linux/serial_8250.h> -#include <linux/serial_reg.h> -#include <linux/tty.h> -#include <linux/irq.h> - -#include <asm/time.h> - -#include <asm/octeon/octeon.h> - -#define DEBUG_UART 1 - -unsigned int octeon_serial_in(struct uart_port *up, int offset) -{ - int rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3))); - if (offset == UART_IIR && (rv & 0xf) == 7) { - /* Busy interrupt, read the USR (39) and try again. */ - cvmx_read_csr((uint64_t)(up->membase + (39 << 3))); - rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3))); - } - return rv; -} - -void octeon_serial_out(struct uart_port *up, int offset, int value) -{ - /* - * If bits 6 or 7 of the OCTEON UART's LCR are set, it quits - * working. - */ - if (offset == UART_LCR) - value &= 0x9f; - cvmx_write_csr((uint64_t)(up->membase + (offset << 3)), (u8)value); -} - -static int octeon_serial_probe(struct platform_device *pdev) -{ - int irq, res; - struct resource *res_mem; - struct uart_8250_port up; - - /* All adaptors have an irq. */ - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - memset(&up, 0, sizeof(up)); - - up.port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; - up.port.type = PORT_OCTEON; - up.port.iotype = UPIO_MEM; - up.port.regshift = 3; - up.port.dev = &pdev->dev; - - if (octeon_is_simulation()) - /* Make simulator output fast*/ - up.port.uartclk = 115200 * 16; - else - up.port.uartclk = octeon_get_io_clock_rate(); - - up.port.serial_in = octeon_serial_in; - up.port.serial_out = octeon_serial_out; - up.port.irq = irq; - - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res_mem == NULL) { - dev_err(&pdev->dev, "found no memory resource\n"); - return -ENXIO; - } - up.port.mapbase = res_mem->start; - up.port.membase = ioremap(res_mem->start, resource_size(res_mem)); - - res = serial8250_register_8250_port(&up); - - return res >= 0 ? 0 : res; -} - -static struct of_device_id octeon_serial_match[] = { - { - .compatible = "cavium,octeon-3860-uart", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, octeon_serial_match); - -static struct platform_driver octeon_serial_driver = { - .probe = octeon_serial_probe, - .driver = { - .owner = THIS_MODULE, - .name = "octeon_serial", - .of_match_table = octeon_serial_match, - }, -}; - -static int __init octeon_serial_init(void) -{ - return platform_driver_register(&octeon_serial_driver); -} -late_initcall(octeon_serial_init); diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 01b1b3f94feb..48b08eb9d9e4 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -7,6 +7,7 @@ * Copyright (C) 2008, 2009 Wind River Systems * written by Ralf Baechle <ralf@linux-mips.org> */ +#include <linux/compiler.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/console.h> @@ -40,12 +41,6 @@ #include <asm/octeon/pci-octeon.h> #include <asm/octeon/cvmx-mio-defs.h> -#ifdef CONFIG_CAVIUM_DECODE_RSL -extern void cvmx_interrupt_rsl_decode(void); -extern int __cvmx_interrupt_ecc_report_single_bit_errors; -extern void cvmx_interrupt_rsl_enable(void); -#endif - extern struct plat_smp_ops octeon_smp_ops; #ifdef CONFIG_PCI @@ -463,18 +458,6 @@ static void octeon_halt(void) } /** - * Handle all the error condition interrupts that might occur. - * - */ -#ifdef CONFIG_CAVIUM_DECODE_RSL -static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id) -{ - cvmx_interrupt_rsl_decode(); - return IRQ_HANDLED; -} -#endif - -/** * Return a string representing the system type * * Returns @@ -712,7 +695,7 @@ void __init prom_init(void) if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) { pr_info("Skipping L2 locking due to reduced L2 cache size\n"); } else { - uint32_t ebase = read_c0_ebase() & 0x3ffff000; + uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000; #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB /* TLB refill */ cvmx_l2c_lock_mem_region(ebase, 0x100); @@ -996,7 +979,7 @@ void __init plat_mem_setup(void) cvmx_bootmem_unlock(); /* Add the memory region for the kernel. */ kernel_start = (unsigned long) _text; - kernel_size = ALIGN(_end - _text, 0x100000); + kernel_size = _end - _text; /* Adjust for physical offset. */ kernel_start &= ~0xffffffff80000000ULL; @@ -1064,15 +1047,6 @@ void prom_free_prom_memory(void) panic("Core-14449 WAR not in place (%04x).\n" "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); } -#ifdef CONFIG_CAVIUM_DECODE_RSL - cvmx_interrupt_rsl_enable(); - - /* Add an interrupt handler for general failures. */ - if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED, - "RML/RSL", octeon_rlm_interrupt)) { - panic("Unable to request_irq(OCTEON_IRQ_RML)"); - } -#endif } int octeon_prune_device_tree(void); diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index 014ba4bbba7d..dace58268ce1 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -1,13 +1,11 @@ -CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD=y +CONFIG_CAVIUM_OCTEON_SOC=y CONFIG_CAVIUM_CN63XXP1=y CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2 -CONFIG_SPARSEMEM_MANUAL=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_SMP=y CONFIG_NR_CPUS=32 CONFIG_HZ_100=y CONFIG_PREEMPT=y -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -50,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y # CONFIG_MTD_OF_PARTS is not set -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -114,6 +111,7 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 +CONFIG_SERIAL_8250_DW=y # CONFIG_HW_RANDOM is not set CONFIG_I2C=y CONFIG_I2C_OCTEON=y diff --git a/arch/mips/configs/wrppmc_defconfig b/arch/mips/configs/wrppmc_defconfig deleted file mode 100644 index 44a451be359e..000000000000 --- a/arch/mips/configs/wrppmc_defconfig +++ /dev/null @@ -1,97 +0,0 @@ -CONFIG_WR_PPMC=y -CONFIG_HZ_1000=y -CONFIG_EXPERIMENTAL=y -# CONFIG_SWAP is not set -CONFIG_SYSVIPC=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_EXPERT=y -CONFIG_KALLSYMS_EXTRA_PASS=y -# CONFIG_EPOLL is not set -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PCI=y -CONFIG_HOTPLUG_PCI=y -CONFIG_BINFMT_MISC=y -CONFIG_PM=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -CONFIG_IP_MROUTE=y -CONFIG_ARPD=y -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_TCP_MD5SIG=y -# CONFIG_IPV6 is not set -CONFIG_NETWORK_SECMARK=y -CONFIG_FW_LOADER=m -CONFIG_BLK_DEV_RAM=y -CONFIG_SGI_IOC4=m -CONFIG_NETDEVICES=y -CONFIG_PHYLIB=y -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y -CONFIG_E100=y -CONFIG_QLA3XXX=m -CONFIG_CHELSIO_T3=m -CONFIG_NETXEN_NIC=m -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=1 -CONFIG_SERIAL_8250_RUNTIME_UARTS=1 -# CONFIG_HW_RANDOM is not set -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_ROOT_NFS=y -CONFIG_DLM=m -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="console=ttyS0,115200n8" -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_CBC=m -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_LIBCRC32C=y diff --git a/arch/mips/dec/Makefile b/arch/mips/dec/Makefile index 9eb2f9c036aa..3d5d2c56de8d 100644 --- a/arch/mips/dec/Makefile +++ b/arch/mips/dec/Makefile @@ -5,6 +5,5 @@ obj-y := ecc-berr.o int-handler.o ioasic-irq.o kn01-berr.o \ kn02-irq.o kn02xa-berr.o reset.o setup.o time.o -obj-$(CONFIG_PROM_CONSOLE) += promcon.o obj-$(CONFIG_TC) += tc.o obj-$(CONFIG_CPU_HAS_WB) += wbflush.o diff --git a/arch/mips/dec/promcon.c b/arch/mips/dec/promcon.c deleted file mode 100644 index c239c25b79ff..000000000000 --- a/arch/mips/dec/promcon.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Wrap-around code for a console using the - * DECstation PROM io-routines. - * - * Copyright (c) 1998 Harald Koerfgen - */ - -#include <linux/tty.h> -#include <linux/ptrace.h> -#include <linux/init.h> -#include <linux/console.h> -#include <linux/fs.h> - -#include <asm/dec/prom.h> - -static void prom_console_write(struct console *co, const char *s, - unsigned count) -{ - unsigned i; - - /* - * Now, do each character - */ - for (i = 0; i < count; i++) { - if (*s == 10) - prom_printf("%c", 13); - prom_printf("%c", *s++); - } -} - -static int __init prom_console_setup(struct console *co, char *options) -{ - return 0; -} - -static struct console sercons = { - .name = "ttyS", - .write = prom_console_write, - .setup = prom_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, -}; - -/* - * Register console. - */ - -static int __init prom_console_init(void) -{ - register_console(&sercons); - - return 0; -} -console_initcall(prom_console_init); diff --git a/arch/mips/fw/cfe/cfe_api.c b/arch/mips/fw/cfe/cfe_api.c index d06dc5a6b8d3..cf84f01931c5 100644 --- a/arch/mips/fw/cfe/cfe_api.c +++ b/arch/mips/fw/cfe/cfe_api.c @@ -406,12 +406,12 @@ int cfe_setenv(char *name, char *val) return xiocb.xiocb_status; } -int cfe_write(int handle, unsigned char *buffer, int length) +int cfe_write(int handle, const char *buffer, int length) { return cfe_writeblk(handle, 0, buffer, length); } -int cfe_writeblk(int handle, s64 offset, unsigned char *buffer, int length) +int cfe_writeblk(int handle, s64 offset, const char *buffer, int length) { struct cfe_xiocb xiocb; diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h index 3532e2c5f098..c1516cc0285f 100644 --- a/arch/mips/include/asm/cop2.h +++ b/arch/mips/include/asm/cop2.h @@ -11,6 +11,35 @@ #include <linux/notifier.h> +#if defined(CONFIG_CPU_CAVIUM_OCTEON) + +extern void octeon_cop2_save(struct octeon_cop2_state *); +extern void octeon_cop2_restore(struct octeon_cop2_state *); + +#define cop2_save(r) octeon_cop2_save(r) +#define cop2_restore(r) octeon_cop2_restore(r) + +#define cop2_present 1 +#define cop2_lazy_restore 1 + +#elif defined(CONFIG_CPU_XLP) + +extern void nlm_cop2_save(struct nlm_cop2_state *); +extern void nlm_cop2_restore(struct nlm_cop2_state *); +#define cop2_save(r) nlm_cop2_save(r) +#define cop2_restore(r) nlm_cop2_restore(r) + +#define cop2_present 1 +#define cop2_lazy_restore 0 + +#else + +#define cop2_present 0 +#define cop2_lazy_restore 0 +#define cop2_save(r) +#define cop2_restore(r) +#endif + enum cu2_ops { CU2_EXCEPTION, CU2_LWC2_OP, diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index e5ec8fcd8afa..1dc086087a72 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -24,6 +24,16 @@ #ifndef cpu_has_tlb #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) #endif + +/* + * For the moment we don't consider R6000 and R8000 so we can assume that + * anything that doesn't support R4000-style exceptions and interrupts is + * R3000-like. Users should still treat these two macro definitions as + * opaque. + */ +#ifndef cpu_has_3kex +#define cpu_has_3kex (!cpu_has_4kex) +#endif #ifndef cpu_has_4kex #define cpu_has_4kex (cpu_data[0].options & MIPS_CPU_4KEX) #endif @@ -87,19 +97,23 @@ #define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16) #endif #ifndef cpu_has_mdmx -#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX) +#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX) #endif #ifndef cpu_has_mips3d -#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D) +#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D) #endif #ifndef cpu_has_smartmips -#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) +#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) #endif #ifndef cpu_has_rixi #define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI) #endif #ifndef cpu_has_mmips -#define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS) +# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS +# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS) +# else +# define cpu_has_mmips 0 +# endif #endif #ifndef cpu_has_vtag_icache #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) @@ -111,7 +125,7 @@ #define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC) #endif #ifndef cpu_has_pindexed_dcache -#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX) +#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX) #endif #ifndef cpu_has_local_ebase #define cpu_has_local_ebase 1 @@ -136,7 +150,6 @@ #endif #endif -# define cpu_has_mips_1 (cpu_data[0].isa_level & MIPS_CPU_ISA_I) #ifndef cpu_has_mips_2 # define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) #endif @@ -149,18 +162,18 @@ #ifndef cpu_has_mips_5 # define cpu_has_mips_5 (cpu_data[0].isa_level & MIPS_CPU_ISA_V) #endif -# ifndef cpu_has_mips32r1 +#ifndef cpu_has_mips32r1 # define cpu_has_mips32r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R1) -# endif -# ifndef cpu_has_mips32r2 +#endif +#ifndef cpu_has_mips32r2 # define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) -# endif -# ifndef cpu_has_mips64r1 +#endif +#ifndef cpu_has_mips64r1 # define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) -# endif -# ifndef cpu_has_mips64r2 +#endif +#ifndef cpu_has_mips64r2 # define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) -# endif +#endif /* * Shortcuts ... @@ -182,9 +195,9 @@ * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ. */ -# ifndef cpu_has_clo_clz -# define cpu_has_clo_clz cpu_has_mips_r -# endif +#ifndef cpu_has_clo_clz +#define cpu_has_clo_clz cpu_has_mips_r +#endif #ifndef cpu_has_dsp #define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP) @@ -210,7 +223,7 @@ # define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) # endif # ifndef cpu_has_64bit_zero_reg -# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) +# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) # endif # ifndef cpu_has_64bit_gp_regs # define cpu_has_64bit_gp_regs 0 diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index dd86ab205483..632bbe5a79ea 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -282,18 +282,17 @@ enum cpu_type_enum { * ISA Level encodings * */ -#define MIPS_CPU_ISA_I 0x00000001 -#define MIPS_CPU_ISA_II 0x00000002 -#define MIPS_CPU_ISA_III 0x00000004 -#define MIPS_CPU_ISA_IV 0x00000008 -#define MIPS_CPU_ISA_V 0x00000010 -#define MIPS_CPU_ISA_M32R1 0x00000020 -#define MIPS_CPU_ISA_M32R2 0x00000040 -#define MIPS_CPU_ISA_M64R1 0x00000080 -#define MIPS_CPU_ISA_M64R2 0x00000100 - -#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \ - MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2) +#define MIPS_CPU_ISA_II 0x00000001 +#define MIPS_CPU_ISA_III 0x00000002 +#define MIPS_CPU_ISA_IV 0x00000004 +#define MIPS_CPU_ISA_V 0x00000008 +#define MIPS_CPU_ISA_M32R1 0x00000010 +#define MIPS_CPU_ISA_M32R2 0x00000020 +#define MIPS_CPU_ISA_M64R1 0x00000040 +#define MIPS_CPU_ISA_M64R2 0x00000080 + +#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ + MIPS_CPU_ISA_M32R2) #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) diff --git a/arch/mips/include/asm/fw/cfe/cfe_api.h b/arch/mips/include/asm/fw/cfe/cfe_api.h index 17347551a1b2..a0ea69e91e2e 100644 --- a/arch/mips/include/asm/fw/cfe/cfe_api.h +++ b/arch/mips/include/asm/fw/cfe/cfe_api.h @@ -115,8 +115,8 @@ int cfe_read(int handle, unsigned char *buffer, int length); int cfe_readblk(int handle, int64_t offset, unsigned char *buffer, int length); int cfe_setenv(char *name, char *val); -int cfe_write(int handle, unsigned char *buffer, int length); -int cfe_writeblk(int handle, int64_t offset, unsigned char *buffer, +int cfe_write(int handle, const char *buffer, int length); +int cfe_writeblk(int handle, int64_t offset, const char *buffer, int length); #endif /* CFE_API_H */ diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 7153b32de18e..b2e3e93dd7d8 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h @@ -347,7 +347,7 @@ struct gic_shared_intr_map { #define GIC_CPU_INT2 2 /* . */ #define GIC_CPU_INT3 3 /* . */ #define GIC_CPU_INT4 4 /* . */ -#define GIC_CPU_INT5 5 /* Core Interrupt 5 */ +#define GIC_CPU_INT5 5 /* Core Interrupt 7 */ /* Local GIC interrupts. */ #define GIC_INT_TMR (GIC_CPU_INT5) diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index b7e59853fd33..3321dd5a8872 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -170,6 +170,11 @@ static inline void * isa_bus_to_virt(unsigned long address) extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); extern void __iounmap(const volatile void __iomem *addr); +#ifndef CONFIG_PCI +struct pci_dev; +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} +#endif + static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, unsigned long flags) { @@ -449,6 +454,11 @@ __BUILDIO(q, u64) #define readl_relaxed readl #define readq_relaxed readq +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel +#define writeq_relaxed writeq + #define readb_be(addr) \ __raw_readb((__force unsigned *)(addr)) #define readw_be(addr) \ diff --git a/arch/mips/include/asm/kspd.h b/arch/mips/include/asm/kspd.h deleted file mode 100644 index ec6832950ace..000000000000 --- a/arch/mips/include/asm/kspd.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - */ - -#ifndef _ASM_KSPD_H -#define _ASM_KSPD_H - -struct kspd_notifications { - void (*kspd_sp_exit)(int sp_id); - - struct list_head list; -}; - -static inline void kspd_notify(struct kspd_notifications *notify) -{ -} - -#endif diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h index ac28f273449c..660ab64c0fc9 100644 --- a/arch/mips/include/asm/mach-ar7/spaces.h +++ b/arch/mips/include/asm/mach-ar7/spaces.h @@ -14,8 +14,11 @@ * This handles the memory map. * We handle pages at KSEG0 for kernels with 32 bit address space. */ -#define PAGE_OFFSET 0x94000000UL -#define PHYS_OFFSET 0x14000000UL +#define PAGE_OFFSET _AC(0x94000000, UL) +#define PHYS_OFFSET _AC(0x14000000, UL) + +#define UNCAC_BASE _AC(0xb4000000, UL) /* 0xa0000000 + PHYS_OFFSET */ +#define IO_BASE UNCAC_BASE #include <asm/mach-generic/spaces.h> diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index 336228990808..19f9134bfe2f 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h @@ -9,6 +9,7 @@ * compile time if only one CPU support is enabled (idea stolen from * arm mach-types) */ +#define BCM3368_CPU_ID 0x3368 #define BCM6328_CPU_ID 0x6328 #define BCM6338_CPU_ID 0x6338 #define BCM6345_CPU_ID 0x6345 @@ -22,6 +23,19 @@ u16 __bcm63xx_get_cpu_id(void); u8 bcm63xx_get_cpu_rev(void); unsigned int bcm63xx_get_cpu_freq(void); +#ifdef CONFIG_BCM63XX_CPU_3368 +# ifdef bcm63xx_get_cpu_id +# undef bcm63xx_get_cpu_id +# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() +# define BCMCPU_RUNTIME_DETECT +# else +# define bcm63xx_get_cpu_id() BCM3368_CPU_ID +# endif +# define BCMCPU_IS_3368() (bcm63xx_get_cpu_id() == BCM3368_CPU_ID) +#else +# define BCMCPU_IS_3368() (0) +#endif + #ifdef CONFIG_BCM63XX_CPU_6328 # ifdef bcm63xx_get_cpu_id # undef bcm63xx_get_cpu_id @@ -173,7 +187,10 @@ enum bcm63xx_regs_set { #define BCM_6358_RSET_SPI_SIZE 1804 #define BCM_6368_RSET_SPI_SIZE 1804 #define RSET_ENET_SIZE 2048 -#define RSET_ENETDMA_SIZE 2048 +#define RSET_ENETDMA_SIZE 256 +#define RSET_6345_ENETDMA_SIZE 64 +#define RSET_ENETDMAC_SIZE(chans) (16 * (chans)) +#define RSET_ENETDMAS_SIZE(chans) (16 * (chans)) #define RSET_ENETSW_SIZE 65536 #define RSET_UART_SIZE 24 #define RSET_UDC_SIZE 256 @@ -191,6 +208,53 @@ enum bcm63xx_regs_set { #define RSET_RNG_SIZE 20 /* + * 3368 register sets base address + */ +#define BCM_3368_DSL_LMEM_BASE (0xdeadbeef) +#define BCM_3368_PERF_BASE (0xfff8c000) +#define BCM_3368_TIMER_BASE (0xfff8c040) +#define BCM_3368_WDT_BASE (0xfff8c080) +#define BCM_3368_UART0_BASE (0xfff8c100) +#define BCM_3368_UART1_BASE (0xfff8c120) +#define BCM_3368_GPIO_BASE (0xfff8c080) +#define BCM_3368_SPI_BASE (0xfff8c800) +#define BCM_3368_HSSPI_BASE (0xdeadbeef) +#define BCM_3368_UDC0_BASE (0xdeadbeef) +#define BCM_3368_USBDMA_BASE (0xdeadbeef) +#define BCM_3368_OHCI0_BASE (0xdeadbeef) +#define BCM_3368_OHCI_PRIV_BASE (0xdeadbeef) +#define BCM_3368_USBH_PRIV_BASE (0xdeadbeef) +#define BCM_3368_USBD_BASE (0xdeadbeef) +#define BCM_3368_MPI_BASE (0xfff80000) +#define BCM_3368_PCMCIA_BASE (0xfff80054) +#define BCM_3368_PCIE_BASE (0xdeadbeef) +#define BCM_3368_SDRAM_REGS_BASE (0xdeadbeef) +#define BCM_3368_DSL_BASE (0xdeadbeef) +#define BCM_3368_UBUS_BASE (0xdeadbeef) +#define BCM_3368_ENET0_BASE (0xfff98000) +#define BCM_3368_ENET1_BASE (0xfff98800) +#define BCM_3368_ENETDMA_BASE (0xfff99800) +#define BCM_3368_ENETDMAC_BASE (0xfff99900) +#define BCM_3368_ENETDMAS_BASE (0xfff99a00) +#define BCM_3368_ENETSW_BASE (0xdeadbeef) +#define BCM_3368_EHCI0_BASE (0xdeadbeef) +#define BCM_3368_SDRAM_BASE (0xdeadbeef) +#define BCM_3368_MEMC_BASE (0xfff84000) +#define BCM_3368_DDR_BASE (0xdeadbeef) +#define BCM_3368_M2M_BASE (0xdeadbeef) +#define BCM_3368_ATM_BASE (0xdeadbeef) +#define BCM_3368_XTM_BASE (0xdeadbeef) +#define BCM_3368_XTMDMA_BASE (0xdeadbeef) +#define BCM_3368_XTMDMAC_BASE (0xdeadbeef) +#define BCM_3368_XTMDMAS_BASE (0xdeadbeef) +#define BCM_3368_PCM_BASE (0xfff9c200) +#define BCM_3368_PCMDMA_BASE (0xdeadbeef) +#define BCM_3368_PCMDMAC_BASE (0xdeadbeef) +#define BCM_3368_PCMDMAS_BASE (0xdeadbeef) +#define BCM_3368_RNG_BASE (0xdeadbeef) +#define BCM_3368_MISC_BASE (0xdeadbeef) + +/* * 6328 register sets base address */ #define BCM_6328_DSL_LMEM_BASE (0xdeadbeef) @@ -235,6 +299,8 @@ enum bcm63xx_regs_set { #define BCM_6328_PCMDMAS_BASE (0xdeadbeef) #define BCM_6328_RNG_BASE (0xdeadbeef) #define BCM_6328_MISC_BASE (0xb0001800) +#define BCM_6328_OTP_BASE (0xb0000600) + /* * 6338 register sets base address */ @@ -298,7 +364,7 @@ enum bcm63xx_regs_set { #define BCM_6345_USBDMA_BASE (0xfffe2800) #define BCM_6345_ENET0_BASE (0xfffe1800) #define BCM_6345_ENETDMA_BASE (0xfffe2800) -#define BCM_6345_ENETDMAC_BASE (0xfffe2900) +#define BCM_6345_ENETDMAC_BASE (0xfffe2840) #define BCM_6345_ENETDMAS_BASE (0xfffe2a00) #define BCM_6345_ENETSW_BASE (0xdeadbeef) #define BCM_6345_PCMCIA_BASE (0xfffe2028) @@ -620,6 +686,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) #ifdef BCMCPU_RUNTIME_DETECT return bcm63xx_regs_base[set]; #else +#ifdef CONFIG_BCM63XX_CPU_3368 + __GEN_RSET(3368) +#endif #ifdef CONFIG_BCM63XX_CPU_6328 __GEN_RSET(6328) #endif @@ -687,6 +756,52 @@ enum bcm63xx_irq { }; /* + * 3368 irqs + */ +#define BCM_3368_TIMER_IRQ (IRQ_INTERNAL_BASE + 0) +#define BCM_3368_SPI_IRQ (IRQ_INTERNAL_BASE + 1) +#define BCM_3368_UART0_IRQ (IRQ_INTERNAL_BASE + 2) +#define BCM_3368_UART1_IRQ (IRQ_INTERNAL_BASE + 3) +#define BCM_3368_DSL_IRQ 0 +#define BCM_3368_UDC0_IRQ 0 +#define BCM_3368_OHCI0_IRQ 0 +#define BCM_3368_ENET0_IRQ (IRQ_INTERNAL_BASE + 8) +#define BCM_3368_ENET1_IRQ (IRQ_INTERNAL_BASE + 6) +#define BCM_3368_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 9) +#define BCM_3368_ENET0_RXDMA_IRQ (IRQ_INTERNAL_BASE + 15) +#define BCM_3368_ENET0_TXDMA_IRQ (IRQ_INTERNAL_BASE + 16) +#define BCM_3368_HSSPI_IRQ 0 +#define BCM_3368_EHCI0_IRQ 0 +#define BCM_3368_USBD_IRQ 0 +#define BCM_3368_USBD_RXDMA0_IRQ 0 +#define BCM_3368_USBD_TXDMA0_IRQ 0 +#define BCM_3368_USBD_RXDMA1_IRQ 0 +#define BCM_3368_USBD_TXDMA1_IRQ 0 +#define BCM_3368_USBD_RXDMA2_IRQ 0 +#define BCM_3368_USBD_TXDMA2_IRQ 0 +#define BCM_3368_ENET1_RXDMA_IRQ (IRQ_INTERNAL_BASE + 17) +#define BCM_3368_ENET1_TXDMA_IRQ (IRQ_INTERNAL_BASE + 18) +#define BCM_3368_PCI_IRQ (IRQ_INTERNAL_BASE + 31) +#define BCM_3368_PCMCIA_IRQ 0 +#define BCM_3368_ATM_IRQ 0 +#define BCM_3368_ENETSW_RXDMA0_IRQ 0 +#define BCM_3368_ENETSW_RXDMA1_IRQ 0 +#define BCM_3368_ENETSW_RXDMA2_IRQ 0 +#define BCM_3368_ENETSW_RXDMA3_IRQ 0 +#define BCM_3368_ENETSW_TXDMA0_IRQ 0 +#define BCM_3368_ENETSW_TXDMA1_IRQ 0 +#define BCM_3368_ENETSW_TXDMA2_IRQ 0 +#define BCM_3368_ENETSW_TXDMA3_IRQ 0 +#define BCM_3368_XTM_IRQ 0 +#define BCM_3368_XTM_DMA0_IRQ 0 + +#define BCM_3368_EXT_IRQ0 (IRQ_INTERNAL_BASE + 25) +#define BCM_3368_EXT_IRQ1 (IRQ_INTERNAL_BASE + 26) +#define BCM_3368_EXT_IRQ2 (IRQ_INTERNAL_BASE + 27) +#define BCM_3368_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28) + + +/* * 6328 irqs */ #define BCM_6328_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h index d53f611184b9..753953e86242 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h @@ -4,6 +4,8 @@ #include <linux/if_ether.h> #include <linux/init.h> +#include <bcm63xx_regs.h> + /* * on board ethernet platform data */ @@ -37,9 +39,129 @@ struct bcm63xx_enet_platform_data { int phy_id, int reg), void (*mii_write)(struct net_device *dev, int phy_id, int reg, int val)); + + /* DMA channel enable mask */ + u32 dma_chan_en_mask; + + /* DMA channel interrupt mask */ + u32 dma_chan_int_mask; + + /* DMA engine has internal SRAM */ + bool dma_has_sram; + + /* DMA channel register width */ + unsigned int dma_chan_width; + + /* DMA descriptor shift */ + unsigned int dma_desc_shift; +}; + +/* + * on board ethernet switch platform data + */ +#define ENETSW_MAX_PORT 8 +#define ENETSW_PORTS_6328 5 /* 4 FE PHY + 1 RGMII */ +#define ENETSW_PORTS_6368 6 /* 4 FE PHY + 2 RGMII */ + +#define ENETSW_RGMII_PORT0 4 + +struct bcm63xx_enetsw_port { + int used; + int phy_id; + + int bypass_link; + int force_speed; + int force_duplex_full; + + const char *name; +}; + +struct bcm63xx_enetsw_platform_data { + char mac_addr[ETH_ALEN]; + int num_ports; + struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT]; + + /* DMA channel enable mask */ + u32 dma_chan_en_mask; + + /* DMA channel interrupt mask */ + u32 dma_chan_int_mask; + + /* DMA channel register width */ + unsigned int dma_chan_width; + + /* DMA engine has internal SRAM */ + bool dma_has_sram; }; int __init bcm63xx_enet_register(int unit, const struct bcm63xx_enet_platform_data *pd); +int bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd); + +enum bcm63xx_regs_enetdmac { + ENETDMAC_CHANCFG, + ENETDMAC_IR, + ENETDMAC_IRMASK, + ENETDMAC_MAXBURST, + ENETDMAC_BUFALLOC, + ENETDMAC_RSTART, + ENETDMAC_FC, + ENETDMAC_LEN, +}; + +static inline unsigned long bcm63xx_enetdmacreg(enum bcm63xx_regs_enetdmac reg) +{ +#ifdef BCMCPU_RUNTIME_DETECT + extern const unsigned long *bcm63xx_regs_enetdmac; + + return bcm63xx_regs_enetdmac[reg]; +#else +#ifdef CONFIG_BCM63XX_CPU_6345 + switch (reg) { + case ENETDMAC_CHANCFG: + return ENETDMA_6345_CHANCFG_REG; + case ENETDMAC_IR: + return ENETDMA_6345_IR_REG; + case ENETDMAC_IRMASK: + return ENETDMA_6345_IRMASK_REG; + case ENETDMAC_MAXBURST: + return ENETDMA_6345_MAXBURST_REG; + case ENETDMAC_BUFALLOC: + return ENETDMA_6345_BUFALLOC_REG; + case ENETDMAC_RSTART: + return ENETDMA_6345_RSTART_REG; + case ENETDMAC_FC: + return ENETDMA_6345_FC_REG; + case ENETDMAC_LEN: + return ENETDMA_6345_LEN_REG; + } +#endif +#if defined(CONFIG_BCM63XX_CPU_6328) || \ + defined(CONFIG_BCM63XX_CPU_6338) || \ + defined(CONFIG_BCM63XX_CPU_6348) || \ + defined(CONFIG_BCM63XX_CPU_6358) || \ + defined(CONFIG_BCM63XX_CPU_6362) || \ + defined(CONFIG_BCM63XX_CPU_6368) + switch (reg) { + case ENETDMAC_CHANCFG: + return ENETDMAC_CHANCFG_REG; + case ENETDMAC_IR: + return ENETDMAC_IR_REG; + case ENETDMAC_IRMASK: + return ENETDMAC_IRMASK_REG; + case ENETDMAC_MAXBURST: + return ENETDMAC_MAXBURST_REG; + case ENETDMAC_BUFALLOC: + case ENETDMAC_RSTART: + case ENETDMAC_FC: + case ENETDMAC_LEN: + return 0; + } +#endif +#endif + return 0; +} + + #endif /* ! BCM63XX_DEV_ENET_H_ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h index 35baa1a60a64..565ff36a1119 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h @@ -11,6 +11,7 @@ static inline unsigned long bcm63xx_gpio_count(void) switch (bcm63xx_get_cpu_id()) { case BCM6328_CPU_ID: return 32; + case BCM3368_CPU_ID: case BCM6358_CPU_ID: return 40; case BCM6338_CPU_ID: diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 3203fe49b34d..9875db31d883 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -15,6 +15,39 @@ /* Clock Control register */ #define PERF_CKCTL_REG 0x4 +#define CKCTL_3368_MAC_EN (1 << 3) +#define CKCTL_3368_TC_EN (1 << 5) +#define CKCTL_3368_US_TOP_EN (1 << 6) +#define CKCTL_3368_DS_TOP_EN (1 << 7) +#define CKCTL_3368_APM_EN (1 << 8) +#define CKCTL_3368_SPI_EN (1 << 9) +#define CKCTL_3368_USBS_EN (1 << 10) +#define CKCTL_3368_BMU_EN (1 << 11) +#define CKCTL_3368_PCM_EN (1 << 12) +#define CKCTL_3368_NTP_EN (1 << 13) +#define CKCTL_3368_ACP_B_EN (1 << 14) +#define CKCTL_3368_ACP_A_EN (1 << 15) +#define CKCTL_3368_EMUSB_EN (1 << 17) +#define CKCTL_3368_ENET0_EN (1 << 18) +#define CKCTL_3368_ENET1_EN (1 << 19) +#define CKCTL_3368_USBU_EN (1 << 20) +#define CKCTL_3368_EPHY_EN (1 << 21) + +#define CKCTL_3368_ALL_SAFE_EN (CKCTL_3368_MAC_EN | \ + CKCTL_3368_TC_EN | \ + CKCTL_3368_US_TOP_EN | \ + CKCTL_3368_DS_TOP_EN | \ + CKCTL_3368_APM_EN | \ + CKCTL_3368_SPI_EN | \ + CKCTL_3368_USBS_EN | \ + CKCTL_3368_BMU_EN | \ + CKCTL_3368_PCM_EN | \ + CKCTL_3368_NTP_EN | \ + CKCTL_3368_ACP_B_EN | \ + CKCTL_3368_ACP_A_EN | \ + CKCTL_3368_EMUSB_EN | \ + CKCTL_3368_USBU_EN) + #define CKCTL_6328_PHYMIPS_EN (1 << 0) #define CKCTL_6328_ADSL_QPROC_EN (1 << 1) #define CKCTL_6328_ADSL_AFE_EN (1 << 2) @@ -181,6 +214,7 @@ #define SYS_PLL_SOFT_RESET 0x1 /* Interrupt Mask register */ +#define PERF_IRQMASK_3368_REG 0xc #define PERF_IRQMASK_6328_REG 0x20 #define PERF_IRQMASK_6338_REG 0xc #define PERF_IRQMASK_6345_REG 0xc @@ -190,6 +224,7 @@ #define PERF_IRQMASK_6368_REG 0x20 /* Interrupt Status register */ +#define PERF_IRQSTAT_3368_REG 0x10 #define PERF_IRQSTAT_6328_REG 0x28 #define PERF_IRQSTAT_6338_REG 0x10 #define PERF_IRQSTAT_6345_REG 0x10 @@ -199,6 +234,7 @@ #define PERF_IRQSTAT_6368_REG 0x28 /* External Interrupt Configuration register */ +#define PERF_EXTIRQ_CFG_REG_3368 0x14 #define PERF_EXTIRQ_CFG_REG_6328 0x18 #define PERF_EXTIRQ_CFG_REG_6338 0x14 #define PERF_EXTIRQ_CFG_REG_6345 0x14 @@ -236,6 +272,13 @@ #define PERF_SOFTRESET_6362_REG 0x10 #define PERF_SOFTRESET_6368_REG 0x10 +#define SOFTRESET_3368_SPI_MASK (1 << 0) +#define SOFTRESET_3368_ENET_MASK (1 << 2) +#define SOFTRESET_3368_MPI_MASK (1 << 3) +#define SOFTRESET_3368_EPHY_MASK (1 << 6) +#define SOFTRESET_3368_USBS_MASK (1 << 11) +#define SOFTRESET_3368_PCM_MASK (1 << 13) + #define SOFTRESET_6328_SPI_MASK (1 << 0) #define SOFTRESET_6328_EPHY_MASK (1 << 1) #define SOFTRESET_6328_SAR_MASK (1 << 2) @@ -727,6 +770,8 @@ /************************************************************************* * _REG relative to RSET_ENETDMA *************************************************************************/ +#define ENETDMA_CHAN_WIDTH 0x10 +#define ENETDMA_6345_CHAN_WIDTH 0x40 /* Controller Configuration Register */ #define ENETDMA_CFG_REG (0x0) @@ -782,31 +827,56 @@ /* State Ram Word 4 */ #define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10) +/* Broadcom 6345 ENET DMA definitions */ +#define ENETDMA_6345_CHANCFG_REG (0x00) + +#define ENETDMA_6345_MAXBURST_REG (0x40) + +#define ENETDMA_6345_RSTART_REG (0x08) + +#define ENETDMA_6345_LEN_REG (0x0C) + +#define ENETDMA_6345_IR_REG (0x14) + +#define ENETDMA_6345_IRMASK_REG (0x18) + +#define ENETDMA_6345_FC_REG (0x1C) + +#define ENETDMA_6345_BUFALLOC_REG (0x20) + +/* Shift down for EOP, SOP and WRAP bits */ +#define ENETDMA_6345_DESC_SHIFT (3) /************************************************************************* * _REG relative to RSET_ENETDMAC *************************************************************************/ /* Channel Configuration register */ -#define ENETDMAC_CHANCFG_REG(x) ((x) * 0x10) +#define ENETDMAC_CHANCFG_REG (0x0) #define ENETDMAC_CHANCFG_EN_SHIFT 0 #define ENETDMAC_CHANCFG_EN_MASK (1 << ENETDMAC_CHANCFG_EN_SHIFT) #define ENETDMAC_CHANCFG_PKTHALT_SHIFT 1 #define ENETDMAC_CHANCFG_PKTHALT_MASK (1 << ENETDMAC_CHANCFG_PKTHALT_SHIFT) #define ENETDMAC_CHANCFG_BUFHALT_SHIFT 2 #define ENETDMAC_CHANCFG_BUFHALT_MASK (1 << ENETDMAC_CHANCFG_BUFHALT_SHIFT) +#define ENETDMAC_CHANCFG_CHAINING_SHIFT 2 +#define ENETDMAC_CHANCFG_CHAINING_MASK (1 << ENETDMAC_CHANCFG_CHAINING_SHIFT) +#define ENETDMAC_CHANCFG_WRAP_EN_SHIFT 3 +#define ENETDMAC_CHANCFG_WRAP_EN_MASK (1 << ENETDMAC_CHANCFG_WRAP_EN_SHIFT) +#define ENETDMAC_CHANCFG_FLOWC_EN_SHIFT 4 +#define ENETDMAC_CHANCFG_FLOWC_EN_MASK (1 << ENETDMAC_CHANCFG_FLOWC_EN_SHIFT) /* Interrupt Control/Status register */ -#define ENETDMAC_IR_REG(x) (0x4 + (x) * 0x10) +#define ENETDMAC_IR_REG (0x4) #define ENETDMAC_IR_BUFDONE_MASK (1 << 0) #define ENETDMAC_IR_PKTDONE_MASK (1 << 1) #define ENETDMAC_IR_NOTOWNER_MASK (1 << 2) /* Interrupt Mask register */ -#define ENETDMAC_IRMASK_REG(x) (0x8 + (x) * 0x10) +#define ENETDMAC_IRMASK_REG (0x8) /* Maximum Burst Length */ -#define ENETDMAC_MAXBURST_REG(x) (0xc + (x) * 0x10) +#define ENETDMAC_MAXBURST_REG (0xc) /************************************************************************* @@ -814,26 +884,76 @@ *************************************************************************/ /* Ring Start Address register */ -#define ENETDMAS_RSTART_REG(x) ((x) * 0x10) +#define ENETDMAS_RSTART_REG (0x0) /* State Ram Word 2 */ -#define ENETDMAS_SRAM2_REG(x) (0x4 + (x) * 0x10) +#define ENETDMAS_SRAM2_REG (0x4) /* State Ram Word 3 */ -#define ENETDMAS_SRAM3_REG(x) (0x8 + (x) * 0x10) +#define ENETDMAS_SRAM3_REG (0x8) /* State Ram Word 4 */ -#define ENETDMAS_SRAM4_REG(x) (0xc + (x) * 0x10) +#define ENETDMAS_SRAM4_REG (0xc) /************************************************************************* * _REG relative to RSET_ENETSW *************************************************************************/ +/* Port traffic control */ +#define ENETSW_PTCTRL_REG(x) (0x0 + (x)) +#define ENETSW_PTCTRL_RXDIS_MASK (1 << 0) +#define ENETSW_PTCTRL_TXDIS_MASK (1 << 1) + +/* Switch mode register */ +#define ENETSW_SWMODE_REG (0xb) +#define ENETSW_SWMODE_FWD_EN_MASK (1 << 1) + +/* IMP override Register */ +#define ENETSW_IMPOV_REG (0xe) +#define ENETSW_IMPOV_FORCE_MASK (1 << 7) +#define ENETSW_IMPOV_TXFLOW_MASK (1 << 5) +#define ENETSW_IMPOV_RXFLOW_MASK (1 << 4) +#define ENETSW_IMPOV_1000_MASK (1 << 3) +#define ENETSW_IMPOV_100_MASK (1 << 2) +#define ENETSW_IMPOV_FDX_MASK (1 << 1) +#define ENETSW_IMPOV_LINKUP_MASK (1 << 0) + +/* Port override Register */ +#define ENETSW_PORTOV_REG(x) (0x58 + (x)) +#define ENETSW_PORTOV_ENABLE_MASK (1 << 6) +#define ENETSW_PORTOV_TXFLOW_MASK (1 << 5) +#define ENETSW_PORTOV_RXFLOW_MASK (1 << 4) +#define ENETSW_PORTOV_1000_MASK (1 << 3) +#define ENETSW_PORTOV_100_MASK (1 << 2) +#define ENETSW_PORTOV_FDX_MASK (1 << 1) +#define ENETSW_PORTOV_LINKUP_MASK (1 << 0) + +/* MDIO control register */ +#define ENETSW_MDIOC_REG (0xb0) +#define ENETSW_MDIOC_EXT_MASK (1 << 16) +#define ENETSW_MDIOC_REG_SHIFT 20 +#define ENETSW_MDIOC_PHYID_SHIFT 25 +#define ENETSW_MDIOC_RD_MASK (1 << 30) +#define ENETSW_MDIOC_WR_MASK (1 << 31) + +/* MDIO data register */ +#define ENETSW_MDIOD_REG (0xb4) + +/* Global Management Configuration Register */ +#define ENETSW_GMCR_REG (0x200) +#define ENETSW_GMCR_RST_MIB_MASK (1 << 0) + /* MIB register */ #define ENETSW_MIB_REG(x) (0x2800 + (x) * 4) #define ENETSW_MIB_REG_COUNT 47 +/* Jumbo control register port mask register */ +#define ENETSW_JMBCTL_PORT_REG (0x4004) + +/* Jumbo control mib good frame register */ +#define ENETSW_JMBCTL_MAXSIZE_REG (0x4008) + /************************************************************************* * _REG relative to RSET_OHCI_PRIV @@ -1293,7 +1413,7 @@ #define SPI_6348_RX_DATA 0x80 #define SPI_6348_RX_DATA_SIZE 0x3f -/* BCM 6358/6262/6368 SPI core */ +/* BCM 3368/6358/6262/6368 SPI core */ #define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ #define SPI_6358_MSG_CTL_WIDTH 16 #define SPI_6358_MSG_DATA 0x02 @@ -1434,4 +1554,11 @@ #define PCIE_DEVICE_OFFSET 0x8000 +/************************************************************************* + * _REG relative to RSET_OTP + *************************************************************************/ + +#define OTP_USER_BITS_6328_REG(i) (0x20 + (i) * 4) +#define OTP_6328_REG3_TP1_DISABLED BIT(9) + #endif /* BCM63XX_REGS_H_ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h index 682bcf3b492a..b86a0efba665 100644 --- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h +++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h @@ -24,6 +24,7 @@ struct board_info { /* enabled feature/device */ unsigned int has_enet0:1; unsigned int has_enet1:1; + unsigned int has_enetsw:1; unsigned int has_pci:1; unsigned int has_pccard:1; unsigned int has_ohci0:1; @@ -36,6 +37,7 @@ struct board_info { /* ethernet config */ struct bcm63xx_enet_platform_data enet0; struct bcm63xx_enet_platform_data enet1; + struct bcm63xx_enetsw_platform_data enetsw; /* USB config */ struct bcm63xx_usbd_platform_data usbd; @@ -45,6 +47,12 @@ struct board_info { /* GPIO LEDs */ struct gpio_led leds[5]; + + /* External PHY reset GPIO */ + unsigned int ephy_reset_gpio; + + /* External PHY reset GPIO flags from gpio.h */ + unsigned long ephy_reset_gpio_flags; }; #endif /* ! BOARD_BCM963XX_H_ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h index 94e3011ba7df..ff15e3b14e7a 100644 --- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h +++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h @@ -11,6 +11,10 @@ static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) static inline int is_bcm63xx_internal_registers(phys_t offset) { switch (bcm63xx_get_cpu_id()) { + case BCM3368_CPU_ID: + if (offset >= 0xfff80000) + return 1; + break; case BCM6338_CPU_ID: case BCM6345_CPU_ID: case BCM6348_CPU_ID: diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h index be8fb4240cec..47fb247f9663 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h @@ -13,6 +13,8 @@ #ifndef __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H #define __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H +#include <linux/bug.h> + struct device; extern void octeon_pci_dma_init(void); @@ -21,18 +23,21 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) { BUG(); + return 0; } static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) { BUG(); + return 0; } static inline unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr) { BUG(); + return 0; } static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, @@ -44,6 +49,7 @@ static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, static inline int plat_dma_supported(struct device *dev, u64 mask) { BUG(); + return 0; } static inline void plat_extra_sync_for_device(struct device *dev) @@ -60,6 +66,7 @@ static inline int plat_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { BUG(); + return 0; } dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 1e7dbb192657..1668ee57acb9 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -34,15 +34,10 @@ ori v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE dmtc0 v0, CP0_CVMMEMCTL_REG # Write the cavium mem control register dmfc0 v0, CP0_CVMCTL_REG # Read the cavium control register -#ifdef CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED # Disable unaligned load/store support but leave HW fixup enabled + # Needed for octeon specific memcpy or v0, v0, 0x5001 xor v0, v0, 0x1001 -#else - # Disable unaligned load/store and HW fixup support - or v0, v0, 0x5001 - xor v0, v0, 0x5001 -#endif # Read the processor ID register mfc0 v1, CP0_PRID_REG # Disable instruction prefetching (Octeon Pass1 errata) diff --git a/arch/mips/include/asm/mach-cavium-octeon/spaces.h b/arch/mips/include/asm/mach-cavium-octeon/spaces.h new file mode 100644 index 000000000000..daa91accf5ab --- /dev/null +++ b/arch/mips/include/asm/mach-cavium-octeon/spaces.h @@ -0,0 +1,24 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 Cavium, Inc. + */ +#ifndef _ASM_MACH_CAVIUM_OCTEON_SPACES_H +#define _ASM_MACH_CAVIUM_OCTEON_SPACES_H + +#include <linux/const.h> + +#ifdef CONFIG_64BIT +/* They are all the same and some OCTEON II cores cannot handle 0xa8.. */ +#define CAC_BASE _AC(0x8000000000000000, UL) +#define UNCAC_BASE _AC(0x8000000000000000, UL) +#define IO_BASE _AC(0x8000000000000000, UL) + + +#endif /* CONFIG_64BIT */ + +#include <asm/mach-generic/spaces.h> + +#endif /* _ASM_MACH_CAVIUM_OCTEON_SPACES_H */ diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h index fe23034aaf72..74cb99257d5b 100644 --- a/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/arch/mips/include/asm/mach-generic/dma-coherence.h @@ -66,4 +66,16 @@ static inline int plat_device_is_coherent(struct device *dev) #endif } +#ifdef CONFIG_SWIOTLB +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr; +} +#endif + #endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/mach-generic/kernel-entry-init.h b/arch/mips/include/asm/mach-generic/kernel-entry-init.h index 7e66505fa574..13b0751b010a 100644 --- a/arch/mips/include/asm/mach-generic/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-generic/kernel-entry-init.h @@ -12,8 +12,8 @@ /* Intentionally empty macro, used in head.S. Override in * arch/mips/mach-xxx/kernel-entry-init.h when necessary. */ -.macro kernel_entry_setup -.endm + .macro kernel_entry_setup + .endm /* * Do SMP slave processor setup necessary before we can savely execute C code. diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h index a323efb720dc..b087cb83da3a 100644 --- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h @@ -24,6 +24,53 @@ .endm /* + * TLB bits + */ +#define PAGE_GLOBAL (1 << 6) +#define PAGE_VALID (1 << 7) +#define PAGE_DIRTY (1 << 8) +#define CACHE_CACHABLE_COW (5 << 9) + + /* + * inputs are the text nasid in t1, data nasid in t2. + */ + .macro MAPPED_KERNEL_SETUP_TLB +#ifdef CONFIG_MAPPED_KERNEL + /* + * This needs to read the nasid - assume 0 for now. + * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0, + * 0+DVG in tlblo_1. + */ + dli t0, 0xffffffffc0000000 + dmtc0 t0, CP0_ENTRYHI + li t0, 0x1c000 # Offset of text into node memory + dsll t1, NASID_SHFT # Shift text nasid into place + dsll t2, NASID_SHFT # Same for data nasid + or t1, t1, t0 # Physical load address of kernel text + or t2, t2, t0 # Physical load address of kernel data + dsrl t1, 12 # 4K pfn + dsrl t2, 12 # 4K pfn + dsll t1, 6 # Get pfn into place + dsll t2, 6 # Get pfn into place + li t0, ((PAGE_GLOBAL | PAGE_VALID | CACHE_CACHABLE_COW) >> 6) + or t0, t0, t1 + mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr + li t0, ((PAGE_GLOBAL | PAGE_VALID | PAGE_DIRTY | CACHE_CACHABLE_COW) >> 6) + or t0, t0, t2 + mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr + li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M + mtc0 t0, CP0_PAGEMASK + li t0, 0 # KMAP_INX + mtc0 t0, CP0_INDEX + li t0, 1 + mtc0 t0, CP0_WIRED + tlbwi +#else + mtc0 zero, CP0_WIRED +#endif + .endm + +/* * Intentionally empty macro, used in head.S. Override in * arch/mips/mach-xxx/kernel-entry-init.h when necessary. */ diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h index 5edf05d9dad8..5d6a76434d00 100644 --- a/arch/mips/include/asm/mach-ip28/spaces.h +++ b/arch/mips/include/asm/mach-ip28/spaces.h @@ -11,11 +11,14 @@ #ifndef _ASM_MACH_IP28_SPACES_H #define _ASM_MACH_IP28_SPACES_H -#define CAC_BASE 0xa800000000000000 +#define CAC_BASE _AC(0xa800000000000000, UL) -#define HIGHMEM_START (~0UL) +#define HIGHMEM_START (~0UL) -#define PHYS_OFFSET _AC(0x20000000, UL) +#define PHYS_OFFSET _AC(0x20000000, UL) + +#define UNCAC_BASE _AC(0xc0000000, UL) /* 0xa0000000 + PHYS_OFFSET */ +#define IO_BASE UNCAC_BASE #include <asm/mach-generic/spaces.h> diff --git a/arch/mips/include/asm/mach-jz4740/dma.h b/arch/mips/include/asm/mach-jz4740/dma.h index 98b4e7c0dbae..509cd5828044 100644 --- a/arch/mips/include/asm/mach-jz4740/dma.h +++ b/arch/mips/include/asm/mach-jz4740/dma.h @@ -16,8 +16,6 @@ #ifndef __ASM_MACH_JZ4740_DMA_H__ #define __ASM_MACH_JZ4740_DMA_H__ -struct jz4740_dma_chan; - enum jz4740_dma_request_type { JZ4740_DMA_TYPE_AUTO_REQUEST = 8, JZ4740_DMA_TYPE_UART_TRANSMIT = 20, @@ -33,58 +31,4 @@ enum jz4740_dma_request_type { JZ4740_DMA_TYPE_SLCD = 30, }; -enum jz4740_dma_width { - JZ4740_DMA_WIDTH_32BIT = 0, - JZ4740_DMA_WIDTH_8BIT = 1, - JZ4740_DMA_WIDTH_16BIT = 2, -}; - -enum jz4740_dma_transfer_size { - JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0, - JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1, - JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2, - JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3, - JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4, -}; - -enum jz4740_dma_flags { - JZ4740_DMA_SRC_AUTOINC = 0x2, - JZ4740_DMA_DST_AUTOINC = 0x1, -}; - -enum jz4740_dma_mode { - JZ4740_DMA_MODE_SINGLE = 0, - JZ4740_DMA_MODE_BLOCK = 1, -}; - -struct jz4740_dma_config { - enum jz4740_dma_width src_width; - enum jz4740_dma_width dst_width; - enum jz4740_dma_transfer_size transfer_size; - enum jz4740_dma_request_type request_type; - enum jz4740_dma_flags flags; - enum jz4740_dma_mode mode; -}; - -typedef void (*jz4740_dma_complete_callback_t)(struct jz4740_dma_chan *, int, void *); - -struct jz4740_dma_chan *jz4740_dma_request(void *dev, const char *name); -void jz4740_dma_free(struct jz4740_dma_chan *dma); - -void jz4740_dma_configure(struct jz4740_dma_chan *dma, - const struct jz4740_dma_config *config); - - -void jz4740_dma_enable(struct jz4740_dma_chan *dma); -void jz4740_dma_disable(struct jz4740_dma_chan *dma); - -void jz4740_dma_set_src_addr(struct jz4740_dma_chan *dma, dma_addr_t src); -void jz4740_dma_set_dst_addr(struct jz4740_dma_chan *dma, dma_addr_t dst); -void jz4740_dma_set_transfer_count(struct jz4740_dma_chan *dma, uint32_t count); - -uint32_t jz4740_dma_get_residue(const struct jz4740_dma_chan *dma); - -void jz4740_dma_set_complete_cb(struct jz4740_dma_chan *dma, - jz4740_dma_complete_callback_t cb); - #endif /* __ASM_JZ4740_DMA_H__ */ diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h index 72cfebdb5a47..05988c2d6565 100644 --- a/arch/mips/include/asm/mach-jz4740/platform.h +++ b/arch/mips/include/asm/mach-jz4740/platform.h @@ -32,6 +32,7 @@ extern struct platform_device jz4740_codec_device; extern struct platform_device jz4740_adc_device; extern struct platform_device jz4740_wdt_device; extern struct platform_device jz4740_pwm_device; +extern struct platform_device jz4740_dma_device; void jz4740_serial_device_register(void); diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h b/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h deleted file mode 100644 index ebdbab973e41..000000000000 --- a/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * include/asm-mips/pmc-sierra/msp71xx/gpio.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * @author Patrick Glass <patrickglass@gmail.com> - */ - -#ifndef __PMC_MSP71XX_GPIO_H -#define __PMC_MSP71XX_GPIO_H - -/* Max number of gpio's is 28 on chip plus 3 banks of I2C IO Expanders */ -#define ARCH_NR_GPIOS (28 + (3 * 8)) - -/* new generic GPIO API - see Documentation/gpio.txt */ -#include <asm-generic/gpio.h> - -#define gpio_get_value __gpio_get_value -#define gpio_set_value __gpio_set_value -#define gpio_cansleep __gpio_cansleep - -/* Setup calls for the gpio and gpio extended */ -extern void msp71xx_init_gpio(void); -extern void msp71xx_init_gpio_extended(void); -extern int msp71xx_set_output_drive(unsigned gpio, int value); - -/* Custom output drive functionss */ -static inline int gpio_set_output_drive(unsigned gpio, int value) -{ - return msp71xx_set_output_drive(gpio, value); -} - -/* IRQ's are not supported for gpio lines */ -static inline int gpio_to_irq(unsigned gpio) -{ - return -EINVAL; -} - -static inline int irq_to_gpio(unsigned irq) -{ - return -EINVAL; -} - -#endif /* __PMC_MSP71XX_GPIO_H */ diff --git a/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h b/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h deleted file mode 100644 index 00fa3684ac98..000000000000 --- a/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * This is a direct copy of the ev96100.h file, with a global - * search and replace. The numbers are the same. - * - * The reason I'm duplicating this is so that the 64120/96100 - * defines won't be confusing in the source code. - */ -#ifndef __ASM_MIPS_GT64120_H -#define __ASM_MIPS_GT64120_H - -/* - * This is the CPU physical memory map of PPMC Board: - * - * 0x00000000-0x03FFFFFF - 64MB SDRAM (SCS[0]#) - * 0x1C000000-0x1C000000 - LED (CS0) - * 0x1C800000-0x1C800007 - UART 16550 port (CS1) - * 0x1F000000-0x1F000000 - MailBox (CS3) - * 0x1FC00000-0x20000000 - 4MB Flash (BOOT CS) - */ - -#define WRPPMC_SDRAM_SCS0_BASE 0x00000000 -#define WRPPMC_SDRAM_SCS0_SIZE 0x04000000 - -#define WRPPMC_UART16550_BASE 0x1C800000 -#define WRPPMC_UART16550_CLOCK 3686400 /* 3.68MHZ */ - -#define WRPPMC_LED_BASE 0x1C000000 -#define WRPPMC_MBOX_BASE 0x1F000000 - -#define WRPPMC_BOOTROM_BASE 0x1FC00000 -#define WRPPMC_BOOTROM_SIZE 0x00400000 /* 4M Flash */ - -#define WRPPMC_MIPS_TIMER_IRQ 7 /* MIPS compare/count timer interrupt */ -#define WRPPMC_UART16550_IRQ 6 -#define WRPPMC_PCI_INTA_IRQ 3 - -/* - * PCI Bus I/O and Memory resources allocation - * - * NOTE: We only have PCI_0 hose interface - */ -#define GT_PCI_MEM_BASE 0x13000000UL -#define GT_PCI_MEM_SIZE 0x02000000UL -#define GT_PCI_IO_BASE 0x11000000UL -#define GT_PCI_IO_SIZE 0x02000000UL - -/* - * PCI interrupts will come in on either the INTA or INTD interrupt lines, - * which are mapped to the #2 and #5 interrupt pins of the MIPS. On our - * boards, they all either come in on IntD or they all come in on IntA, they - * aren't mixed. There can be numerous PCI interrupts, so we keep a list of the - * "requested" interrupt numbers and go through the list whenever we get an - * IntA/D. - * - * Interrupts < 8 are directly wired to the processor; PCI INTA is 8 and - * INTD is 11. - */ -#define GT_TIMER 4 -#define GT_INTA 2 -#define GT_INTD 5 - -#ifndef __ASSEMBLY__ - -/* - * GT64120 internal register space base address - */ -extern unsigned long gt64120_base; - -#define GT64120_BASE (gt64120_base) - -/* define WRPPMC_EARLY_DEBUG to enable early output something to UART */ -#undef WRPPMC_EARLY_DEBUG - -#ifdef WRPPMC_EARLY_DEBUG -extern void wrppmc_led_on(int mask); -extern void wrppmc_led_off(int mask); -extern void wrppmc_early_printk(const char *fmt, ...); -#else -#define wrppmc_early_printk(fmt, ...) do {} while (0) -#endif /* WRPPMC_EARLY_DEBUG */ - -#endif /* __ASSEMBLY__ */ -#endif /* __ASM_MIPS_GT64120_H */ diff --git a/arch/mips/include/asm/mach-wrppmc/war.h b/arch/mips/include/asm/mach-wrppmc/war.h deleted file mode 100644 index e86084c0bd6b..000000000000 --- a/arch/mips/include/asm/mach-wrppmc/war.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_WRPPMC_WAR_H -#define __ASM_MIPS_MACH_WRPPMC_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define R5432_CP0_INTERRUPT_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 1 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_WRPPMC_WAR_H */ diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h index bd9746fbe4af..48616816bcbc 100644 --- a/arch/mips/include/asm/mips-boards/generic.h +++ b/arch/mips/include/asm/mips-boards/generic.h @@ -24,12 +24,6 @@ #define ASCII_DISPLAY_POS_BASE 0x1f000418 /* - * Reset register. - */ -#define SOFTRES_REG 0x1f000500 -#define GORESET 0x42 - -/* * Revision register. */ #define MIPS_REVISION_REG 0x1fc00010 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 87e6207b05e4..fed1c3e9b486 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -596,7 +596,7 @@ #define MIPS_CONF3_RXI (_ULCAST_(1) << 12) #define MIPS_CONF3_ULRI (_ULCAST_(1) << 13) #define MIPS_CONF3_ISA (_ULCAST_(3) << 14) -#define MIPS_CONF3_ISA_OE (_ULCAST_(3) << 16) +#define MIPS_CONF3_ISA_OE (_ULCAST_(1) << 16) #define MIPS_CONF3_VZ (_ULCAST_(1) << 23) #define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 516e6e9a5594..3b29079b5424 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -28,11 +28,7 @@ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ do { \ - void (*tlbmiss_handler_setup_pgd)(unsigned long); \ - extern u32 tlbmiss_handler_setup_pgd_array[16]; \ - \ - tlbmiss_handler_setup_pgd = \ - (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \ + extern void tlbmiss_handler_setup_pgd(unsigned long); \ tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \ } while (0) diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index aef560a51a7e..bb68c3398c80 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -39,11 +39,17 @@ * Common SMP definitions */ #define RESET_VEC_PHYS 0x1fc00000 +#define RESET_VEC_SIZE 8192 /* 8KB reset code and data */ #define RESET_DATA_PHYS (RESET_VEC_PHYS + (1<<10)) + +/* Offsets of parameters in the RESET_DATA_PHYS area */ #define BOOT_THREAD_MODE 0 #define BOOT_NMI_LOCK 4 #define BOOT_NMI_HANDLER 8 +/* CPU ready flags for each CPU */ +#define BOOT_CPU_READY 2048 + #ifndef __ASSEMBLY__ #include <linux/cpumask.h> #include <linux/spinlock.h> @@ -59,23 +65,32 @@ int nlm_wakeup_secondary_cpus(void); void nlm_rmiboot_preboot(void); void nlm_percpu_init(int hwcpuid); +static inline void * +nlm_get_boot_data(int offset) +{ + return (void *)(CKSEG1ADDR(RESET_DATA_PHYS) + offset); +} + static inline void nlm_set_nmi_handler(void *handler) { - char *reset_data; + void *nmih = nlm_get_boot_data(BOOT_NMI_HANDLER); - reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS); - *(int64_t *)(reset_data + BOOT_NMI_HANDLER) = (long)handler; + *(int64_t *)nmih = (long)handler; } /* * Misc. */ +void nlm_init_boot_cpu(void); unsigned int nlm_get_cpu_frequency(void); void nlm_node_init(int node); extern struct plat_smp_ops nlm_smp_ops; extern char nlm_reset_entry[], nlm_reset_entry_end[]; +/* SWIOTLB */ +extern struct dma_map_ops nlm_swiotlb_dma_ops; + extern unsigned int nlm_threads_per_core; extern cpumask_t nlm_cpumask; diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h index a981f4681a15..4b5108dfaa16 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h @@ -315,7 +315,7 @@ nlm_pic_send_ipi(uint64_t base, int hwt, int irq, int nmi) { uint64_t ipi; - ipi = (nmi << 31) | (irq << 20); + ipi = ((uint64_t)nmi << 31) | (irq << 20); ipi |= ((hwt >> 4) << 16) | (1 << (hwt & 0xf)); /* cpuset and mask */ nlm_write_pic_reg(base, PIC_IPI_CTL, ipi); } diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h index 7e47209327a5..f4ea0f7f3965 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h @@ -59,6 +59,7 @@ void xlp_wakeup_secondary_cpus(void); void xlp_mmu_init(void); void nlm_hal_init(void); +void *xlp_dt_init(void *fdtp); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_NLM_XLP_H */ diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h index 2a78929cef73..5604db3d1836 100644 --- a/arch/mips/include/asm/netlogic/xlr/fmn.h +++ b/arch/mips/include/asm/netlogic/xlr/fmn.h @@ -175,6 +175,10 @@ #define nlm_write_c2_cc14(s, v) __write_32bit_c2_register($30, s, v) #define nlm_write_c2_cc15(s, v) __write_32bit_c2_register($31, s, v) +#define nlm_read_c2_status0() __read_32bit_c2_register($2, 0) +#define nlm_write_c2_status0(v) __write_32bit_c2_register($2, 0, v) +#define nlm_read_c2_status1() __read_32bit_c2_register($2, 1) +#define nlm_write_c2_status1(v) __write_32bit_c2_register($2, 1, v) #define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0) #define nlm_read_c2_config() __read_32bit_c2_register($3, 0) #define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v) @@ -237,7 +241,7 @@ static inline void nlm_msgwait(unsigned int mask) /* * Disable interrupts and enable COP2 access */ -static inline uint32_t nlm_cop2_enable(void) +static inline uint32_t nlm_cop2_enable_irqsave(void) { uint32_t sr = read_c0_status(); @@ -245,7 +249,7 @@ static inline uint32_t nlm_cop2_enable(void) return sr; } -static inline void nlm_cop2_restore(uint32_t sr) +static inline void nlm_cop2_disable_irqrestore(uint32_t sr) { write_c0_status(sr); } @@ -296,7 +300,7 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code, */ for (i = 0; i < 8; i++) { nlm_msgsnd(dest); - status = nlm_read_c2_status(0); + status = nlm_read_c2_status0(); if ((status & 0x2) == 1) pr_info("Send pending fail!\n"); if ((status & 0x4) == 0) @@ -316,7 +320,7 @@ static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid, /* wait for load pending to clear */ do { - status = nlm_read_c2_status(1); + status = nlm_read_c2_status0(); } while ((status & 0x08) != 0); /* receive error bits */ diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index 284fa8d773ba..7b7818d1e4d5 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -227,6 +227,7 @@ enum cvmx_board_types_enum { * use any numbers in this range. */ CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001, + CVMX_BOARD_TYPE_UBNT_E100 = 20002, CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000, /* The remaining range is reserved for future use. */ @@ -325,6 +326,7 @@ static inline const char *cvmx_board_type_to_string(enum /* Customer private range */ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX) } return "Unsupported Board"; diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index f59552fae917..f6be4741f7e8 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -205,10 +205,8 @@ extern int __virt_addr_valid(const volatile void *kaddr); #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \ - PHYS_OFFSET) -#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET - \ - PHYS_OFFSET) +#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) +#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index b8e24fd4cbc5..fa8e0aa250ca 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -52,7 +52,6 @@ struct pci_controller { /* * Used by boards to register their PCI busses before the actual scanning. */ -extern struct pci_controller * alloc_pci_controller(void); extern void register_pci_controller(struct pci_controller *hose); /* diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 1470b7b68b0e..3605b844ad87 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -137,7 +137,7 @@ union mips_watch_reg_state { struct mips3264_watch_reg_state mips3264; }; -#ifdef CONFIG_CPU_CAVIUM_OCTEON +#if defined(CONFIG_CPU_CAVIUM_OCTEON) struct octeon_cop2_state { /* DMFC2 rt, 0x0201 */ @@ -182,13 +182,26 @@ struct octeon_cop2_state { /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ unsigned long cop2_gfm_result[2]; }; -#define INIT_OCTEON_COP2 {0,} +#define COP2_INIT \ + .cp2 = {0,}, struct octeon_cvmseg_state { unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE] [cpu_dcache_line_size() / sizeof(unsigned long)]; }; +#elif defined(CONFIG_CPU_XLP) +struct nlm_cop2_state { + u64 rx[4]; + u64 tx[4]; + u32 tx_msg_status; + u32 rx_msg_status; +}; + +#define COP2_INIT \ + .cp2 = {{0}, {0}, 0, 0}, +#else +#define COP2_INIT #endif typedef struct { @@ -231,8 +244,11 @@ struct thread_struct { unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ unsigned long error_code; #ifdef CONFIG_CPU_CAVIUM_OCTEON - struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128))); - struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128))); + struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128))); + struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128))); +#endif +#ifdef CONFIG_CPU_XLP + struct nlm_cop2_state cp2; #endif struct mips_abi *abi; }; @@ -245,13 +261,6 @@ struct thread_struct { #define FPAFF_INIT #endif /* CONFIG_MIPS_MT_FPAFF */ -#ifdef CONFIG_CPU_CAVIUM_OCTEON -#define OCTEON_INIT \ - .cp2 = INIT_OCTEON_COP2, -#else -#define OCTEON_INIT -#endif /* CONFIG_CPU_CAVIUM_OCTEON */ - #define INIT_THREAD { \ /* \ * Saved main processor registers \ @@ -300,9 +309,9 @@ struct thread_struct { .cp0_baduaddr = 0, \ .error_code = 0, \ /* \ - * Cavium Octeon specifics (null if not Octeon) \ + * Platform specific cop2 registers(null if no COP2) \ */ \ - OCTEON_INIT \ + COP2_INIT \ } struct task_struct; diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index a89d1b10d027..23fc95e65673 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -70,6 +70,14 @@ #ifndef CONFIG_CPU_HAS_SMARTMIPS LONG_S v1, PT_LO(sp) #endif +#ifdef CONFIG_CPU_CAVIUM_OCTEON + /* + * The Octeon multiplier state is affected by general + * multiply instructions. It must be saved before and + * kernel code might corrupt it + */ + jal octeon_mult_save +#endif .endm .macro SAVE_STATIC @@ -218,17 +226,8 @@ ori $28, sp, _THREAD_MASK xori $28, _THREAD_MASK #ifdef CONFIG_CPU_CAVIUM_OCTEON - .set mips64 - pref 0, 0($28) /* Prefetch the current pointer */ - pref 0, PT_R31(sp) /* Prefetch the $31(ra) */ - /* The Octeon multiplier state is affected by general multiply - instructions. It must be saved before and kernel code might - corrupt it */ - jal octeon_mult_save - LONG_L v1, 0($28) /* Load the current pointer */ - /* Restore $31(ra) that was changed by the jal */ - LONG_L ra, PT_R31(sp) - pref 0, 0(v1) /* Prefetch the current thread */ + .set mips64 + pref 0, 0($28) /* Prefetch the current pointer */ #endif .set pop .endm @@ -248,6 +247,10 @@ .endm .macro RESTORE_TEMP +#ifdef CONFIG_CPU_CAVIUM_OCTEON + /* Restore the Octeon multiplier state */ + jal octeon_mult_restore +#endif #ifdef CONFIG_CPU_HAS_SMARTMIPS LONG_L $24, PT_ACX(sp) mtlhx $24 @@ -360,10 +363,6 @@ DVPE 5 # dvpe a1 jal mips_ihb #endif /* CONFIG_MIPS_MT_SMTC */ -#ifdef CONFIG_CPU_CAVIUM_OCTEON - /* Restore the Octeon multiplier state */ - jal octeon_mult_restore -#endif mfc0 a0, CP0_STATUS ori a0, STATMASK xori a0, STATMASK diff --git a/arch/mips/include/asm/stackprotector.h b/arch/mips/include/asm/stackprotector.h new file mode 100644 index 000000000000..eb9b1035e926 --- /dev/null +++ b/arch/mips/include/asm/stackprotector.h @@ -0,0 +1,40 @@ +/* + * GCC stack protector support. + * + * (This is directly adopted from the ARM implementation) + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary + * and gcc expects it to be defined by a global variable called + * "__stack_chk_guard" on MIPS. This unfortunately means that on SMP + * we cannot have a different canary value per task. + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +#include <linux/random.h> +#include <linux/version.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index fd16bcb6c311..eb0af15ac656 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -15,6 +15,7 @@ #include <asm/cpu-features.h> #include <asm/watch.h> #include <asm/dsp.h> +#include <asm/cop2.h> struct task_struct; @@ -66,10 +67,18 @@ do { \ #define switch_to(prev, next, last) \ do { \ - u32 __usedfpu; \ + u32 __usedfpu, __c0_stat; \ __mips_mt_fpaff_switch_to(prev); \ if (cpu_has_dsp) \ __save_dsp(prev); \ + if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \ + if (cop2_lazy_restore) \ + KSTK_STATUS(prev) &= ~ST0_CU2; \ + __c0_stat = read_c0_status(); \ + write_c0_status(__c0_stat | ST0_CU2); \ + cop2_save(&prev->thread.cp2); \ + write_c0_status(__c0_stat & ~ST0_CU2); \ + } \ __clear_software_ll_bit(); \ __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \ (last) = resume(prev, next, task_thread_info(next), __usedfpu); \ @@ -77,6 +86,14 @@ do { \ #define finish_arch_switch(prev) \ do { \ + u32 __c0_stat; \ + if (cop2_present && !cop2_lazy_restore && \ + (KSTK_STATUS(current) & ST0_CU2)) { \ + __c0_stat = read_c0_status(); \ + write_c0_status(__c0_stat | ST0_CU2); \ + cop2_restore(¤t->thread.cp2); \ + write_c0_status(__c0_stat & ~ST0_CU2); \ + } \ if (cpu_has_dsp) \ __restore_dsp(current); \ if (cpu_has_userlocal) \ diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 895320e25662..61215a34acc6 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -109,6 +109,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ +#define TIF_NOHZ 19 /* in adaptive nohz mode */ #define TIF_FIXADE 20 /* Fix address errors in software */ #define TIF_LOGADE 21 /* Log address errors to syslog */ #define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */ @@ -124,6 +125,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_USEDFPU (1<<TIF_USEDFPU) +#define _TIF_NOHZ (1<<TIF_NOHZ) #define _TIF_FIXADE (1<<TIF_FIXADE) #define _TIF_LOGADE (1<<TIF_LOGADE) #define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS) @@ -131,14 +133,19 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_FPUBOUND (1<<TIF_FPUBOUND) #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) +#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ + _TIF_SYSCALL_AUDIT) + /* work to do in syscall_trace_leave() */ -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) +#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ + _TIF_SYSCALL_AUDIT) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK \ (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME) /* work to do on any return to u-space */ -#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT) +#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \ + _TIF_WORK_SYSCALL_EXIT) #endif /* __KERNEL__ */ diff --git a/arch/mips/include/asm/xtalk/xtalk.h b/arch/mips/include/asm/xtalk/xtalk.h index 680e7efebbaf..26d2ed1fa917 100644 --- a/arch/mips/include/asm/xtalk/xtalk.h +++ b/arch/mips/include/asm/xtalk/xtalk.h @@ -47,6 +47,15 @@ typedef struct xtalk_piomap_s *xtalk_piomap_t; #define XIO_PORT(x) ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT)) #define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS)) +#ifdef CONFIG_PCI +extern int bridge_probe(nasid_t nasid, int widget, int masterwid); +#else +static inline int bridge_probe(nasid_t nasid, int widget, int masterwid) +{ + return 0; +} +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_XTALK_XTALK_H */ diff --git a/arch/mips/include/uapi/asm/fcntl.h b/arch/mips/include/uapi/asm/fcntl.h index 0bda78f70e1e..6ca432f00860 100644 --- a/arch/mips/include/uapi/asm/fcntl.h +++ b/arch/mips/include/uapi/asm/fcntl.h @@ -5,9 +5,10 @@ * * Copyright (C) 1995, 96, 97, 98, 99, 2003, 05 Ralf Baechle */ -#ifndef _ASM_FCNTL_H -#define _ASM_FCNTL_H +#ifndef _UAPI_ASM_FCNTL_H +#define _UAPI_ASM_FCNTL_H +#include <asm/sgidefs.h> #define O_APPEND 0x0008 #define O_DSYNC 0x0010 /* used to be O_SYNC, see below */ @@ -55,14 +56,15 @@ * contain all the same fields as struct flock. */ -#ifdef CONFIG_32BIT +#if _MIPS_SIM != _MIPS_SIM_ABI64 + #include <linux/types.h> struct flock { short l_type; short l_whence; - off_t l_start; - off_t l_len; + __kernel_off_t l_start; + __kernel_off_t l_len; long l_sysid; __kernel_pid_t l_pid; long pad[4]; @@ -70,8 +72,8 @@ struct flock { #define HAVE_ARCH_STRUCT_FLOCK -#endif /* CONFIG_32BIT */ +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #include <asm-generic/fcntl.h> -#endif /* _ASM_FCNTL_H */ +#endif /* _UAPI_ASM_FCNTL_H */ diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 0f4aec2ad1e6..e5a676e3d3c0 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -409,10 +409,11 @@ enum mm_32f_73_minor_op { enum mm_16c_minor_op { mm_lwm16_op = 0x04, mm_swm16_op = 0x05, - mm_jr16_op = 0x18, - mm_jrc_op = 0x1a, - mm_jalr16_op = 0x1c, - mm_jalrs16_op = 0x1e, + mm_jr16_op = 0x0c, + mm_jrc_op = 0x0d, + mm_jalr16_op = 0x0e, + mm_jalrs16_op = 0x0f, + mm_jraddiusp_op = 0x18, }; /* diff --git a/arch/mips/include/uapi/asm/msgbuf.h b/arch/mips/include/uapi/asm/msgbuf.h index 0d6c7f14de31..df849e87d9ae 100644 --- a/arch/mips/include/uapi/asm/msgbuf.h +++ b/arch/mips/include/uapi/asm/msgbuf.h @@ -14,25 +14,25 @@ struct msqid64_ds { struct ipc64_perm msg_perm; -#if defined(CONFIG_32BIT) && !defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEB__) unsigned long __unused1; #endif __kernel_time_t msg_stime; /* last msgsnd time */ -#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEL__) unsigned long __unused1; #endif -#if defined(CONFIG_32BIT) && !defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEB__) unsigned long __unused2; #endif __kernel_time_t msg_rtime; /* last msgrcv time */ -#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEL__) unsigned long __unused2; #endif -#if defined(CONFIG_32BIT) && !defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEB__) unsigned long __unused3; #endif __kernel_time_t msg_ctime; /* last change time */ -#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEL__) unsigned long __unused3; #endif unsigned long msg_cbytes; /* current number of bytes on queue */ diff --git a/arch/mips/include/uapi/asm/resource.h b/arch/mips/include/uapi/asm/resource.h index 87cb3085269c..b26439d4ab0b 100644 --- a/arch/mips/include/uapi/asm/resource.h +++ b/arch/mips/include/uapi/asm/resource.h @@ -26,7 +26,7 @@ * but we keep the old value on MIPS32, * for compatibility: */ -#ifdef CONFIG_32BIT +#ifndef __mips64 # define RLIM_INFINITY 0x7fffffffUL #endif diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index 6a8714193fb9..b7a23064841f 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -25,10 +25,10 @@ struct siginfo; /* * Careful to keep union _sifields from shifting ... */ -#ifdef CONFIG_32BIT +#if __SIZEOF_LONG__ == 4 #define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) #endif -#ifdef CONFIG_64BIT +#if __SIZEOF_LONG__ == 8 #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #endif diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 3b211507be7f..61c01f054d1b 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -92,4 +92,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h index 97c2f81b4b43..ac9a8f9cd1fb 100644 --- a/arch/mips/include/uapi/asm/swab.h +++ b/arch/mips/include/uapi/asm/swab.h @@ -13,7 +13,7 @@ #define __SWAB_64_THRU_32__ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) static inline __attribute_const__ __u16 __arch_swab16(__u16 x) { @@ -39,10 +39,10 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) #define __arch_swab32 __arch_swab32 /* - * Having already checked for CONFIG_CPU_MIPSR2, enable the - * optimized version for 64-bit kernel on r2 CPUs. + * Having already checked for MIPS R2, enable the optimized version for + * 64-bit kernel on r2 CPUs. */ -#ifdef CONFIG_64BIT +#ifdef __mips64 static inline __attribute_const__ __u64 __arch_swab64(__u64 x) { __asm__( @@ -54,6 +54,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x) return x; } #define __arch_swab64 __arch_swab64 -#endif /* CONFIG_64BIT */ -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* __mips64 */ +#endif /* MIPS R2 or newer */ #endif /* _ASM_SWAB_H */ diff --git a/arch/mips/jz4740/Makefile b/arch/mips/jz4740/Makefile index 63bad0e491d0..28e5535dfa9e 100644 --- a/arch/mips/jz4740/Makefile +++ b/arch/mips/jz4740/Makefile @@ -4,7 +4,7 @@ # Object file lists. -obj-y += prom.o irq.o time.o reset.o setup.o dma.o \ +obj-y += prom.o irq.o time.o reset.o setup.o \ gpio.o clock.o platform.o timer.o serial.o obj-$(CONFIG_DEBUG_FS) += clock-debugfs.o diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index be2b3deeef1d..8a5ec0eedeb0 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -438,6 +438,7 @@ static struct platform_device *jz_platform_devices[] __initdata = { &jz4740_rtc_device, &jz4740_adc_device, &jz4740_pwm_device, + &jz4740_dma_device, &qi_lb60_gpio_keys, &qi_lb60_pwm_beeper, &qi_lb60_charger_device, diff --git a/arch/mips/jz4740/clock.c b/arch/mips/jz4740/clock.c index 484d38a0864f..1b5f55426cad 100644 --- a/arch/mips/jz4740/clock.c +++ b/arch/mips/jz4740/clock.c @@ -687,7 +687,7 @@ static struct clk jz4740_clock_simple_clks[] = { [3] = { .name = "dma", .parent = &jz_clk_high_speed_peripheral.clk, - .gate_bit = JZ_CLOCK_GATE_UART0, + .gate_bit = JZ_CLOCK_GATE_DMAC, .ops = &jz_clk_simple_ops, }, [4] = { diff --git a/arch/mips/jz4740/dma.c b/arch/mips/jz4740/dma.c deleted file mode 100644 index 317ec6fffb12..000000000000 --- a/arch/mips/jz4740/dma.c +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> - * JZ4740 SoC DMA support - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/interrupt.h> - -#include <linux/dma-mapping.h> -#include <asm/mach-jz4740/dma.h> -#include <asm/mach-jz4740/base.h> - -#define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20) -#define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20) -#define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20) -#define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20) -#define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20) -#define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20) -#define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20) - -#define JZ_REG_DMA_CTRL 0x300 -#define JZ_REG_DMA_IRQ 0x304 -#define JZ_REG_DMA_DOORBELL 0x308 -#define JZ_REG_DMA_DOORBELL_SET 0x30C - -#define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31) -#define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6) -#define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4) -#define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3) -#define JZ_DMA_STATUS_CTRL_HALT BIT(2) -#define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1) -#define JZ_DMA_STATUS_CTRL_ENABLE BIT(0) - -#define JZ_DMA_CMD_SRC_INC BIT(23) -#define JZ_DMA_CMD_DST_INC BIT(22) -#define JZ_DMA_CMD_RDIL_MASK (0xf << 16) -#define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14) -#define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12) -#define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8) -#define JZ_DMA_CMD_BLOCK_MODE BIT(7) -#define JZ_DMA_CMD_DESC_VALID BIT(4) -#define JZ_DMA_CMD_DESC_VALID_MODE BIT(3) -#define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2) -#define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1) -#define JZ_DMA_CMD_LINK_ENABLE BIT(0) - -#define JZ_DMA_CMD_FLAGS_OFFSET 22 -#define JZ_DMA_CMD_RDIL_OFFSET 16 -#define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14 -#define JZ_DMA_CMD_DST_WIDTH_OFFSET 12 -#define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8 -#define JZ_DMA_CMD_MODE_OFFSET 7 - -#define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8) -#define JZ_DMA_CTRL_HALT BIT(3) -#define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2) -#define JZ_DMA_CTRL_ENABLE BIT(0) - - -static void __iomem *jz4740_dma_base; -static spinlock_t jz4740_dma_lock; - -static inline uint32_t jz4740_dma_read(size_t reg) -{ - return readl(jz4740_dma_base + reg); -} - -static inline void jz4740_dma_write(size_t reg, uint32_t val) -{ - writel(val, jz4740_dma_base + reg); -} - -static inline void jz4740_dma_write_mask(size_t reg, uint32_t val, uint32_t mask) -{ - uint32_t val2; - val2 = jz4740_dma_read(reg); - val2 &= ~mask; - val2 |= val; - jz4740_dma_write(reg, val2); -} - -struct jz4740_dma_chan { - unsigned int id; - void *dev; - const char *name; - - enum jz4740_dma_flags flags; - uint32_t transfer_shift; - - jz4740_dma_complete_callback_t complete_cb; - - unsigned used:1; -}; - -#define JZ4740_DMA_CHANNEL(_id) { .id = _id } - -struct jz4740_dma_chan jz4740_dma_channels[] = { - JZ4740_DMA_CHANNEL(0), - JZ4740_DMA_CHANNEL(1), - JZ4740_DMA_CHANNEL(2), - JZ4740_DMA_CHANNEL(3), - JZ4740_DMA_CHANNEL(4), - JZ4740_DMA_CHANNEL(5), -}; - -struct jz4740_dma_chan *jz4740_dma_request(void *dev, const char *name) -{ - unsigned int i; - struct jz4740_dma_chan *dma = NULL; - - spin_lock(&jz4740_dma_lock); - - for (i = 0; i < ARRAY_SIZE(jz4740_dma_channels); ++i) { - if (!jz4740_dma_channels[i].used) { - dma = &jz4740_dma_channels[i]; - dma->used = 1; - break; - } - } - - spin_unlock(&jz4740_dma_lock); - - if (!dma) - return NULL; - - dma->dev = dev; - dma->name = name; - - return dma; -} -EXPORT_SYMBOL_GPL(jz4740_dma_request); - -void jz4740_dma_configure(struct jz4740_dma_chan *dma, - const struct jz4740_dma_config *config) -{ - uint32_t cmd; - - switch (config->transfer_size) { - case JZ4740_DMA_TRANSFER_SIZE_2BYTE: - dma->transfer_shift = 1; - break; - case JZ4740_DMA_TRANSFER_SIZE_4BYTE: - dma->transfer_shift = 2; - break; - case JZ4740_DMA_TRANSFER_SIZE_16BYTE: - dma->transfer_shift = 4; - break; - case JZ4740_DMA_TRANSFER_SIZE_32BYTE: - dma->transfer_shift = 5; - break; - default: - dma->transfer_shift = 0; - break; - } - - cmd = config->flags << JZ_DMA_CMD_FLAGS_OFFSET; - cmd |= config->src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET; - cmd |= config->dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET; - cmd |= config->transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET; - cmd |= config->mode << JZ_DMA_CMD_MODE_OFFSET; - cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE; - - jz4740_dma_write(JZ_REG_DMA_CMD(dma->id), cmd); - jz4740_dma_write(JZ_REG_DMA_STATUS_CTRL(dma->id), 0); - jz4740_dma_write(JZ_REG_DMA_REQ_TYPE(dma->id), config->request_type); -} -EXPORT_SYMBOL_GPL(jz4740_dma_configure); - -void jz4740_dma_set_src_addr(struct jz4740_dma_chan *dma, dma_addr_t src) -{ - jz4740_dma_write(JZ_REG_DMA_SRC_ADDR(dma->id), src); -} -EXPORT_SYMBOL_GPL(jz4740_dma_set_src_addr); - -void jz4740_dma_set_dst_addr(struct jz4740_dma_chan *dma, dma_addr_t dst) -{ - jz4740_dma_write(JZ_REG_DMA_DST_ADDR(dma->id), dst); -} -EXPORT_SYMBOL_GPL(jz4740_dma_set_dst_addr); - -void jz4740_dma_set_transfer_count(struct jz4740_dma_chan *dma, uint32_t count) -{ - count >>= dma->transfer_shift; - jz4740_dma_write(JZ_REG_DMA_TRANSFER_COUNT(dma->id), count); -} -EXPORT_SYMBOL_GPL(jz4740_dma_set_transfer_count); - -void jz4740_dma_set_complete_cb(struct jz4740_dma_chan *dma, - jz4740_dma_complete_callback_t cb) -{ - dma->complete_cb = cb; -} -EXPORT_SYMBOL_GPL(jz4740_dma_set_complete_cb); - -void jz4740_dma_free(struct jz4740_dma_chan *dma) -{ - dma->dev = NULL; - dma->complete_cb = NULL; - dma->used = 0; -} -EXPORT_SYMBOL_GPL(jz4740_dma_free); - -void jz4740_dma_enable(struct jz4740_dma_chan *dma) -{ - jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), - JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE, - JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC | - JZ_DMA_STATUS_CTRL_ENABLE); - - jz4740_dma_write_mask(JZ_REG_DMA_CTRL, - JZ_DMA_CTRL_ENABLE, - JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE); -} -EXPORT_SYMBOL_GPL(jz4740_dma_enable); - -void jz4740_dma_disable(struct jz4740_dma_chan *dma) -{ - jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, - JZ_DMA_STATUS_CTRL_ENABLE); -} -EXPORT_SYMBOL_GPL(jz4740_dma_disable); - -uint32_t jz4740_dma_get_residue(const struct jz4740_dma_chan *dma) -{ - uint32_t residue; - residue = jz4740_dma_read(JZ_REG_DMA_TRANSFER_COUNT(dma->id)); - return residue << dma->transfer_shift; -} -EXPORT_SYMBOL_GPL(jz4740_dma_get_residue); - -static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) -{ - (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); - - jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, - JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); - - if (dma->complete_cb) - dma->complete_cb(dma, 0, dma->dev); -} - -static irqreturn_t jz4740_dma_irq(int irq, void *dev_id) -{ - uint32_t irq_status; - unsigned int i; - - irq_status = readl(jz4740_dma_base + JZ_REG_DMA_IRQ); - - for (i = 0; i < 6; ++i) { - if (irq_status & (1 << i)) - jz4740_dma_chan_irq(&jz4740_dma_channels[i]); - } - - return IRQ_HANDLED; -} - -static int jz4740_dma_init(void) -{ - unsigned int ret; - - jz4740_dma_base = ioremap(JZ4740_DMAC_BASE_ADDR, 0x400); - - if (!jz4740_dma_base) - return -EBUSY; - - spin_lock_init(&jz4740_dma_lock); - - ret = request_irq(JZ4740_IRQ_DMAC, jz4740_dma_irq, 0, "DMA", NULL); - - if (ret) - printk(KERN_ERR "JZ4740 DMA: Failed to request irq: %d\n", ret); - - return ret; -} -arch_initcall(jz4740_dma_init); diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c index e9348fd26a35..df65677f3d0b 100644 --- a/arch/mips/jz4740/platform.c +++ b/arch/mips/jz4740/platform.c @@ -329,3 +329,24 @@ struct platform_device jz4740_pwm_device = { .name = "jz4740-pwm", .id = -1, }; + +/* DMA */ +static struct resource jz4740_dma_resources[] = { + { + .start = JZ4740_DMAC_BASE_ADDR, + .end = JZ4740_DMAC_BASE_ADDR + 0x400 - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = JZ4740_IRQ_DMAC, + .end = JZ4740_IRQ_DMAC, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device jz4740_dma_device = { + .name = "jz4740-dma", + .id = -1, + .num_resources = ARRAY_SIZE(jz4740_dma_resources), + .resource = jz4740_dma_resources, +}; diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 0845091ba480..0c2e853c3db4 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -82,6 +82,9 @@ void output_task_defines(void) OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_PID, task_struct, pid); +#if defined(CONFIG_CC_STACKPROTECTOR) + OFFSET(TASK_STACK_CANARY, task_struct, stack_canary); +#endif DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); BLANK(); } diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 46c2ad0703a0..4d78bf445a9c 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -467,5 +467,4 @@ unaligned: printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); force_sig(SIGBUS, current); return -EFAULT; - } diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index de3c25ffd9f9..0c61df281ce6 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -6,6 +6,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include <linux/context_tracking.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/ptrace.h> @@ -171,8 +172,12 @@ static volatile int daddi_ov __cpuinitdata; asmlinkage void __init do_daddi_ov(struct pt_regs *regs) { + enum ctx_state prev_state; + + prev_state = exception_enter(); daddi_ov = 1; regs->cp0_epc += 4; + exception_exit(prev_state); } static inline void check_daddi(void) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index c6568bf4b1b0..c7b1b3c5a761 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -146,8 +146,7 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) case MIPS_CPU_ISA_IV: c->isa_level |= MIPS_CPU_ISA_IV; case MIPS_CPU_ISA_III: - c->isa_level |= MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | - MIPS_CPU_ISA_III; + c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; break; case MIPS_CPU_ISA_M32R2: @@ -156,8 +155,6 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) c->isa_level |= MIPS_CPU_ISA_M32R1; case MIPS_CPU_ISA_II: c->isa_level |= MIPS_CPU_ISA_II; - case MIPS_CPU_ISA_I: - c->isa_level |= MIPS_CPU_ISA_I; break; } } @@ -272,9 +269,6 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) c->options |= MIPS_CPU_ULRI; if (config3 & MIPS_CONF3_ISA) c->options |= MIPS_CPU_MICROMIPS; -#ifdef CONFIG_CPU_MICROMIPS - write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE); -#endif if (config3 & MIPS_CONF3_VZ) c->ases |= MIPS_ASE_VZ; @@ -332,7 +326,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) case PRID_IMP_R2000: c->cputype = CPU_R2000; __cpu_name[cpu] = "R2000"; - set_isa(c, MIPS_CPU_ISA_I); c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | MIPS_CPU_NOFPUEX; if (__cpu_has_fpu()) @@ -352,7 +345,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R3000; __cpu_name[cpu] = "R3000"; } - set_isa(c, MIPS_CPU_ISA_I); c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | MIPS_CPU_NOFPUEX; if (__cpu_has_fpu()) @@ -455,7 +447,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) break; #endif case PRID_IMP_TX39: - set_isa(c, MIPS_CPU_ISA_I); c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { @@ -959,6 +950,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) set_isa(c, MIPS_CPU_ISA_M64R1); c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; } + c->kscratch_mask = 0xf; } #ifdef CONFIG_64BIT diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index 3be9e7bb30ff..f291cf99b03a 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c @@ -4,16 +4,6 @@ #include <asm/uaccess.h> #include <linux/slab.h> -static int __init parse_savemaxmem(char *p) -{ - if (p) - saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; - - return 1; -} -__setup("savemaxmem=", parse_savemaxmem); - - static void *kdump_buf_page; /** diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index c61cdaed2b1d..099912324423 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -28,45 +28,6 @@ #include <kernel-entry-init.h> /* - * inputs are the text nasid in t1, data nasid in t2. - */ - .macro MAPPED_KERNEL_SETUP_TLB -#ifdef CONFIG_MAPPED_KERNEL - /* - * This needs to read the nasid - assume 0 for now. - * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0, - * 0+DVG in tlblo_1. - */ - dli t0, 0xffffffffc0000000 - dmtc0 t0, CP0_ENTRYHI - li t0, 0x1c000 # Offset of text into node memory - dsll t1, NASID_SHFT # Shift text nasid into place - dsll t2, NASID_SHFT # Same for data nasid - or t1, t1, t0 # Physical load address of kernel text - or t2, t2, t0 # Physical load address of kernel data - dsrl t1, 12 # 4K pfn - dsrl t2, 12 # 4K pfn - dsll t1, 6 # Get pfn into place - dsll t2, 6 # Get pfn into place - li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _CACHE_CACHABLE_COW) >> 6) - or t0, t0, t1 - mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr - li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _PAGE_DIRTY|_CACHE_CACHABLE_COW) >> 6) - or t0, t0, t2 - mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr - li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M - mtc0 t0, CP0_PAGEMASK - li t0, 0 # KMAP_INX - mtc0 t0, CP0_INDEX - li t0, 1 - mtc0 t0, CP0_WIRED - tlbwi -#else - mtc0 zero, CP0_WIRED -#endif - .endm - - /* * For the moment disable interrupts, mark the kernel mode and * set ST0_KX so that the CPU does not spit fire when using * 64-bit addresses. A full initialization of the CPU's status diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index c01b307317a9..5b5ddb231f26 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -219,16 +219,15 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, /* Assumption : cpumask refers to a single CPU */ spin_lock_irqsave(&gic_lock, flags); - for (;;) { - /* Re-route this IRQ */ - GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); - /* Update the pcpu_masks */ - for (i = 0; i < NR_CPUS; i++) - clear_bit(irq, pcpu_masks[i].pcpu_mask); - set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); + /* Re-route this IRQ */ + GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); + + /* Update the pcpu_masks */ + for (i = 0; i < NR_CPUS; i++) + clear_bit(irq, pcpu_masks[i].pcpu_mask); + set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); - } cpumask_copy(d->affinity, cpumask); spin_unlock_irqrestore(&gic_lock, flags); diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 33d067148e61..a03e93c4a946 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -168,15 +168,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra) #endif /* arg3: Get frame pointer of current stack */ -#ifdef CONFIG_FRAME_POINTER - move a2, fp -#else /* ! CONFIG_FRAME_POINTER */ #ifdef CONFIG_64BIT PTR_LA a2, PT_SIZE(sp) #else PTR_LA a2, (PT_SIZE+8)(sp) #endif -#endif jal prepare_ftrace_return nop diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 0e23343eb0a9..4204d76af854 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -40,33 +40,6 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) - /* check if we need to save COP2 registers */ - PTR_L t2, TASK_THREAD_INFO(a0) - LONG_L t0, ST_OFF(t2) - bbit0 t0, 30, 1f - - /* Disable COP2 in the stored process state */ - li t1, ST0_CU2 - xor t0, t1 - LONG_S t0, ST_OFF(t2) - - /* Enable COP2 so we can save it */ - mfc0 t0, CP0_STATUS - or t0, t1 - mtc0 t0, CP0_STATUS - - /* Save COP2 */ - daddu a0, THREAD_CP2 - jal octeon_cop2_save - dsubu a0, THREAD_CP2 - - /* Disable COP2 now that we are done */ - mfc0 t0, CP0_STATUS - li t1, ST0_CU2 - xor t0, t1 - mtc0 t0, CP0_STATUS - -1: #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ mfc0 t0, $11,7 /* CvmMemCtl */ @@ -98,6 +71,13 @@ mtc0 t0, $11,7 /* CvmMemCtl */ #endif 3: + +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + PTR_L t8, __stack_chk_guard + LONG_L t9, TASK_STACK_CANARY(a1) + LONG_S t9, 0(t8) +#endif + /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index acb34373679e..8c58d8a84bf3 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -66,9 +66,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "]\n"); } if (cpu_has_mips_r) { - seq_printf(m, "isa\t\t\t:"); - if (cpu_has_mips_1) - seq_printf(m, "%s", " mips1"); + seq_printf(m, "isa\t\t\t: mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c6a041d9d05d..ddc76103e78c 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -201,9 +201,12 @@ int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) return 1; } -/* - * - */ +#ifdef CONFIG_CC_STACKPROTECTOR +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + struct mips_frame_info { void *func; unsigned long func_size; diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 5712bb532245..7e954042f252 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -30,7 +30,7 @@ __init void mips_set_machine_name(const char *name) if (name == NULL) return; - strncpy(mips_machine_name, name, sizeof(mips_machine_name)); + strlcpy(mips_machine_name, name, sizeof(mips_machine_name)); pr_info("MIPS: machine is %s\n", mips_get_machine_name()); } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 9c6299c733a3..8ae1ebef8b71 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -15,6 +15,7 @@ * binaries. */ #include <linux/compiler.h> +#include <linux/context_tracking.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> @@ -534,6 +535,8 @@ static inline int audit_arch(void) */ asmlinkage void syscall_trace_enter(struct pt_regs *regs) { + user_exit(); + /* do the secure computing check first */ secure_computing_strict(regs->regs[2]); @@ -570,6 +573,13 @@ out: */ asmlinkage void syscall_trace_leave(struct pt_regs *regs) { + /* + * We may come here right after calling schedule_user() + * or do_notify_resume(), in which case we can be in RCU + * user mode. + */ + user_exit(); + audit_syscall_exit(regs); if (!(current->ptrace & PT_PTRACED)) @@ -592,4 +602,6 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) send_sig(current->exit_code, current, 1); current->exit_code = 0; } + + user_enter(); } diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 5266c6ee2b35..38af83f84c4a 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -65,6 +65,13 @@ LEAF(resume) fpu_save_single a0, t0 # clobbers t0 1: + +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + PTR_L t8, __stack_chk_guard + LONG_L t9, TASK_STACK_CANARY(a1) + LONG_S t9, 0(t8) +#endif + /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 5e51219990aa..921238a6bd26 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -68,6 +68,12 @@ # clobbers t1 1: +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + PTR_L t8, __stack_chk_guard + LONG_L t9, TASK_STACK_CANARY(a1) + LONG_S t9, 0(t8) +#endif + /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 6fa198db8999..d763f11e35e2 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -437,7 +437,6 @@ static ssize_t file_write(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { int minor = iminor(file_inode(file)); - struct rtlx_channel *rt = &rtlx->channel[minor]; /* any space left... */ if (!rtlx_write_poll(minor)) { diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index e9127ec612ef..e774bb1088b5 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp) stack_done: lw t0, TI_FLAGS($28) # syscall tracing enabled? - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 97a5909a61cf..be6627ead619 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, syscall_trace_entry diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index edcb6594e7b5..cab150789c8d 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, n32_syscall_trace_entry diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 74f485d3c0ef..37605dc8eef7 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp) PTR 4b, bad_stack .previous - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, trace_a_syscall diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index fd3ef2c2afbc..2f285abc76d5 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -8,6 +8,7 @@ * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/cache.h> +#include <linux/context_tracking.h> #include <linux/irqflags.h> #include <linux/sched.h> #include <linux/mm.h> @@ -573,6 +574,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, { local_irq_enable(); + user_exit(); + /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); @@ -581,6 +584,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } + + user_enter(); } #ifdef CONFIG_SMP diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 8e393b8443f7..aea6c0885838 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -63,7 +63,7 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); static void __init bmips_smp_setup(void) { - int i; + int i, cpu = 1, boot_cpu = 0; #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) /* arbitration priority */ @@ -72,13 +72,22 @@ static void __init bmips_smp_setup(void) /* NBK and weak order flags */ set_c0_brcm_config_0(0x30000); + /* Find out if we are running on TP0 or TP1 */ + boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); + /* * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output + * + * If booting from TP1, leave the existing CMT interrupt routing + * such that TP0 responds to SW1 and TP1 responds to SW0. */ - change_c0_brcm_cmt_intr(0xf8018000, - (0x02 << 27) | (0x03 << 15)); + if (boot_cpu == 0) + change_c0_brcm_cmt_intr(0xf8018000, + (0x02 << 27) | (0x03 << 15)); + else + change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27)); /* single core, 2 threads (2 pipelines) */ max_cpus = 2; @@ -106,9 +115,15 @@ static void __init bmips_smp_setup(void) if (!board_ebase_setup) board_ebase_setup = &bmips_ebase_setup; + __cpu_number_map[boot_cpu] = 0; + __cpu_logical_map[0] = boot_cpu; + for (i = 0; i < max_cpus; i++) { - __cpu_number_map[i] = 1; - __cpu_logical_map[i] = 1; + if (i != boot_cpu) { + __cpu_number_map[i] = cpu; + __cpu_logical_map[cpu] = i; + cpu++; + } set_cpu_possible(i, 1); set_cpu_present(i, 1); } @@ -157,7 +172,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) bmips_send_ipi_single(cpu, 0); else { #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) - set_c0_brcm_cmt_ctrl(0x01); + /* Reset slave TP1 if booting from TP0 */ + if (cpu_logical_map(cpu) == 0) + set_c0_brcm_cmt_ctrl(0x01); #elif defined(CONFIG_CPU_BMIPS5000) if (cpu & 0x01) write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a75ae40184aa..0903d70b2cfe 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -13,6 +13,7 @@ */ #include <linux/bug.h> #include <linux/compiler.h> +#include <linux/context_tracking.h> #include <linux/kexec.h> #include <linux/init.h> #include <linux/kernel.h> @@ -264,7 +265,7 @@ static void __show_regs(const struct pt_regs *regs) printk("Status: %08x ", (uint32_t) regs->cp0_status); - if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { + if (cpu_has_3kex) { if (regs->cp0_status & ST0_KUO) printk("KUo "); if (regs->cp0_status & ST0_IEO) @@ -277,7 +278,7 @@ static void __show_regs(const struct pt_regs *regs) printk("KUc "); if (regs->cp0_status & ST0_IEC) printk("IEc "); - } else { + } else if (cpu_has_4kex) { if (regs->cp0_status & ST0_KX) printk("KX "); if (regs->cp0_status & ST0_SX) @@ -423,7 +424,9 @@ asmlinkage void do_be(struct pt_regs *regs) const struct exception_table_entry *fixup = NULL; int data = regs->cp0_cause & 4; int action = MIPS_BE_FATAL; + enum ctx_state prev_state; + prev_state = exception_enter(); /* XXX For now. Fixme, this searches the wrong table ... */ if (data && !user_mode(regs)) fixup = search_dbe_tables(exception_epc(regs)); @@ -436,11 +439,11 @@ asmlinkage void do_be(struct pt_regs *regs) switch (action) { case MIPS_BE_DISCARD: - return; + goto out; case MIPS_BE_FIXUP: if (fixup) { regs->cp0_epc = fixup->nextinsn; - return; + goto out; } break; default: @@ -455,10 +458,13 @@ asmlinkage void do_be(struct pt_regs *regs) field, regs->cp0_epc, field, regs->regs[31]); if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) == NOTIFY_STOP) - return; + goto out; die_if_kernel("Oops", regs); force_sig(SIGBUS, current); + +out: + exception_exit(prev_state); } /* @@ -673,8 +679,10 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) asmlinkage void do_ov(struct pt_regs *regs) { + enum ctx_state prev_state; siginfo_t info; + prev_state = exception_enter(); die_if_kernel("Integer overflow", regs); info.si_code = FPE_INTOVF; @@ -682,6 +690,7 @@ asmlinkage void do_ov(struct pt_regs *regs) info.si_errno = 0; info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); + exception_exit(prev_state); } int process_fpemu_return(int sig, void __user *fault_addr) @@ -713,11 +722,13 @@ int process_fpemu_return(int sig, void __user *fault_addr) */ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) { + enum ctx_state prev_state; siginfo_t info = {0}; + prev_state = exception_enter(); if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP) - return; + goto out; die_if_kernel("FP exception in kernel code", regs); if (fcr31 & FPU_CSR_UNI_X) { @@ -753,7 +764,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) /* If something went wrong, signal */ process_fpemu_return(sig, fault_addr); - return; + goto out; } else if (fcr31 & FPU_CSR_INV_X) info.si_code = FPE_FLTINV; else if (fcr31 & FPU_CSR_DIV_X) @@ -770,6 +781,9 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) info.si_errno = 0; info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); + +out: + exception_exit(prev_state); } static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, @@ -835,9 +849,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, asmlinkage void do_bp(struct pt_regs *regs) { unsigned int opcode, bcode; + enum ctx_state prev_state; unsigned long epc; u16 instr[2]; + prev_state = exception_enter(); if (get_isa16_mode(regs->cp0_epc)) { /* Calculate EPC. */ epc = exception_epc(regs); @@ -852,7 +868,7 @@ asmlinkage void do_bp(struct pt_regs *regs) goto out_sigsegv; bcode = (instr[0] >> 6) & 0x3f; do_trap_or_bp(regs, bcode, "Break"); - return; + goto out; } } else { if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) @@ -876,12 +892,12 @@ asmlinkage void do_bp(struct pt_regs *regs) switch (bcode) { case BRK_KPROBE_BP: if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) - return; + goto out; else break; case BRK_KPROBE_SSTEPBP: if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) - return; + goto out; else break; default: @@ -889,18 +905,24 @@ asmlinkage void do_bp(struct pt_regs *regs) } do_trap_or_bp(regs, bcode, "Break"); + +out: + exception_exit(prev_state); return; out_sigsegv: force_sig(SIGSEGV, current); + goto out; } asmlinkage void do_tr(struct pt_regs *regs) { u32 opcode, tcode = 0; + enum ctx_state prev_state; u16 instr[2]; unsigned long epc = msk_isa16_mode(exception_epc(regs)); + prev_state = exception_enter(); if (get_isa16_mode(regs->cp0_epc)) { if (__get_user(instr[0], (u16 __user *)(epc + 0)) || __get_user(instr[1], (u16 __user *)(epc + 2))) @@ -918,10 +940,14 @@ asmlinkage void do_tr(struct pt_regs *regs) } do_trap_or_bp(regs, tcode, "Trap"); + +out: + exception_exit(prev_state); return; out_sigsegv: force_sig(SIGSEGV, current); + goto out; } asmlinkage void do_ri(struct pt_regs *regs) @@ -929,17 +955,19 @@ asmlinkage void do_ri(struct pt_regs *regs) unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); unsigned long old_epc = regs->cp0_epc; unsigned long old31 = regs->regs[31]; + enum ctx_state prev_state; unsigned int opcode = 0; int status = -1; + prev_state = exception_enter(); if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) == NOTIFY_STOP) - return; + goto out; die_if_kernel("Reserved instruction in kernel code", regs); if (unlikely(compute_return_epc(regs) < 0)) - return; + goto out; if (get_isa16_mode(regs->cp0_epc)) { unsigned short mmop[2] = { 0 }; @@ -974,6 +1002,9 @@ asmlinkage void do_ri(struct pt_regs *regs) regs->regs[31] = old31; force_sig(status, current); } + +out: + exception_exit(prev_state); } /* @@ -1025,21 +1056,16 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, { struct pt_regs *regs = data; - switch (action) { - default: - die_if_kernel("Unhandled kernel unaligned access or invalid " + die_if_kernel("COP2: Unhandled kernel unaligned access or invalid " "instruction", regs); - /* Fall through */ - - case CU2_EXCEPTION: - force_sig(SIGILL, current); - } + force_sig(SIGILL, current); return NOTIFY_OK; } asmlinkage void do_cpu(struct pt_regs *regs) { + enum ctx_state prev_state; unsigned int __user *epc; unsigned long old_epc, old31; unsigned int opcode; @@ -1047,10 +1073,12 @@ asmlinkage void do_cpu(struct pt_regs *regs) int status; unsigned long __maybe_unused flags; - die_if_kernel("do_cpu invoked from kernel context!", regs); - + prev_state = exception_enter(); cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; + if (cpid != 2) + die_if_kernel("do_cpu invoked from kernel context!", regs); + switch (cpid) { case 0: epc = (unsigned int __user *)exception_epc(regs); @@ -1060,7 +1088,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) status = -1; if (unlikely(compute_return_epc(regs) < 0)) - return; + goto out; if (get_isa16_mode(regs->cp0_epc)) { unsigned short mmop[2] = { 0 }; @@ -1093,7 +1121,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) force_sig(status, current); } - return; + goto out; case 3: /* @@ -1131,19 +1159,26 @@ asmlinkage void do_cpu(struct pt_regs *regs) mt_ase_fp_affinity(); } - return; + goto out; case 2: raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); - return; + goto out; } force_sig(SIGILL, current); + +out: + exception_exit(prev_state); } asmlinkage void do_mdmx(struct pt_regs *regs) { + enum ctx_state prev_state; + + prev_state = exception_enter(); force_sig(SIGILL, current); + exception_exit(prev_state); } /* @@ -1151,8 +1186,10 @@ asmlinkage void do_mdmx(struct pt_regs *regs) */ asmlinkage void do_watch(struct pt_regs *regs) { + enum ctx_state prev_state; u32 cause; + prev_state = exception_enter(); /* * Clear WP (bit 22) bit of cause register so we don't loop * forever. @@ -1174,13 +1211,16 @@ asmlinkage void do_watch(struct pt_regs *regs) mips_clear_watch_registers(); local_irq_enable(); } + exception_exit(prev_state); } asmlinkage void do_mcheck(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); int multi_match = regs->cp0_status & ST0_TS; + enum ctx_state prev_state; + prev_state = exception_enter(); show_regs(regs); if (multi_match) { @@ -1202,6 +1242,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) panic("Caught Machine Check exception - %scaused by multiple " "matching entries in the TLB.", (multi_match) ? "" : "not "); + exception_exit(prev_state); } asmlinkage void do_mt(struct pt_regs *regs) @@ -1627,7 +1668,6 @@ void *set_vi_handler(int n, vi_handler_t addr) } extern void tlb_init(void); -extern void flush_tlb_handlers(void); /* * Timer interrupt @@ -1837,6 +1877,15 @@ void __init trap_init(void) ebase += (read_c0_ebase() & 0x3ffff000); } + if (cpu_has_mmips) { + unsigned int config3 = read_c0_config3(); + + if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) + write_c0_config3(config3 | MIPS_CONF3_ISA_OE); + else + write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE); + } + if (board_ebase_setup) board_ebase_setup(); per_cpu_trap_init(true); @@ -1956,7 +2005,6 @@ void __init trap_init(void) set_handler(0x080, &except_vec3_generic, 0x80); local_flush_icache_range(ebase, ebase + 0x400); - flush_tlb_handlers(); sort_extable(__start___dbe_table, __stop___dbe_table); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 203d8857070d..c369a5d35527 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -72,6 +72,7 @@ * A store crossing a page boundary might be executed only partially. * Undo the partial store in this case. */ +#include <linux/context_tracking.h> #include <linux/mm.h> #include <linux/signal.h> #include <linux/smp.h> @@ -684,7 +685,8 @@ const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; -void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr) +static void emulate_load_store_microMIPS(struct pt_regs *regs, + void __user *addr) { unsigned long value; unsigned int res; @@ -1548,11 +1550,14 @@ sigill: ("Unhandled kernel unaligned access or invalid instruction", regs); force_sig(SIGILL, current); } + asmlinkage void do_ade(struct pt_regs *regs) { + enum ctx_state prev_state; unsigned int __user *pc; mm_segment_t seg; + prev_state = exception_enter(); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->cp0_badvaddr); /* @@ -1628,6 +1633,7 @@ sigbus: /* * XXX On return from the signal handler we should advance the epc */ + exception_exit(prev_state); } #ifdef CONFIG_DEBUG_FS diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c index 7726f6157d9e..cbdc4de85bb4 100644 --- a/arch/mips/kernel/watch.c +++ b/arch/mips/kernel/watch.c @@ -111,6 +111,7 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) * disable the register. */ write_c0_watchlo0(7); + back_to_back_c0_hazard(); t = read_c0_watchlo0(); write_c0_watchlo0(0); c->watch_reg_masks[0] = t & 7; @@ -121,12 +122,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) c->watch_reg_use_cnt = 1; t = read_c0_watchhi0(); write_c0_watchhi0(t | 0xff8); + back_to_back_c0_hazard(); t = read_c0_watchhi0(); c->watch_reg_masks[0] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo1(7); + back_to_back_c0_hazard(); t = read_c0_watchlo1(); write_c0_watchlo1(0); c->watch_reg_masks[1] = t & 7; @@ -135,12 +138,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) c->watch_reg_use_cnt = 2; t = read_c0_watchhi1(); write_c0_watchhi1(t | 0xff8); + back_to_back_c0_hazard(); t = read_c0_watchhi1(); c->watch_reg_masks[1] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo2(7); + back_to_back_c0_hazard(); t = read_c0_watchlo2(); write_c0_watchlo2(0); c->watch_reg_masks[2] = t & 7; @@ -149,12 +154,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) c->watch_reg_use_cnt = 3; t = read_c0_watchhi2(); write_c0_watchhi2(t | 0xff8); + back_to_back_c0_hazard(); t = read_c0_watchhi2(); c->watch_reg_masks[2] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo3(7); + back_to_back_c0_hazard(); t = read_c0_watchlo3(); write_c0_watchlo3(0); c->watch_reg_masks[3] = t & 7; @@ -163,6 +170,7 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) c->watch_reg_use_cnt = 4; t = read_c0_watchhi3(); write_c0_watchhi3(t | 0xff8); + back_to_back_c0_hazard(); t = read_c0_watchhi3(); c->watch_reg_masks[3] |= (t & 0xff8); if ((t & 0x80000000) == 0) diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index dd203e59e6fd..a7b044536de4 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -208,6 +208,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) return 0; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 9f9e875967aa..49c460370285 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -112,7 +112,7 @@ int __init plat_of_setup(void) if (!of_have_populated_dt()) panic("device tree not present"); - strncpy(of_ids[0].compatible, soc_info.compatible, + strlcpy(of_ids[0].compatible, soc_info.compatible, sizeof(of_ids[0].compatible)); strncpy(of_ids[1].compatible, "simple-bus", sizeof(of_ids[1].compatible)); diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index f27694fb2ad1..3b7f65cc4218 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c @@ -39,7 +39,7 @@ /* And the same for proc */ -int proc_dolasatstring(ctl_table *table, int write, +int proc_dolasatstring(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; @@ -54,7 +54,7 @@ int proc_dolasatstring(ctl_table *table, int write, } /* proc function to write EEPROM after changing int entry */ -int proc_dolasatint(ctl_table *table, int write, +int proc_dolasatint(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; @@ -72,7 +72,7 @@ int proc_dolasatint(ctl_table *table, int write, static int rtctmp; /* proc function to read/write RealTime Clock */ -int proc_dolasatrtc(ctl_table *table, int write, +int proc_dolasatrtc(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct timespec ts; @@ -97,7 +97,7 @@ int proc_dolasatrtc(ctl_table *table, int write, #endif #ifdef CONFIG_INET -int proc_lasat_ip(ctl_table *table, int write, +int proc_lasat_ip(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { unsigned int ip; @@ -157,7 +157,7 @@ int proc_lasat_ip(ctl_table *table, int write, } #endif -int proc_lasat_prid(ctl_table *table, int write, +int proc_lasat_prid(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; @@ -176,7 +176,7 @@ int proc_lasat_prid(ctl_table *table, int write, extern int lasat_boot_to_service; -static ctl_table lasat_table[] = { +static struct ctl_table lasat_table[] = { { .procname = "cpu-hz", .data = &lasat_board_info.li_cpu_hz, @@ -262,7 +262,7 @@ static ctl_table lasat_table[] = { {} }; -static ctl_table lasat_root_table[] = { +static struct ctl_table lasat_root_table[] = { { .procname = "lasat", .mode = 0555, diff --git a/arch/mips/loongson/common/cs5536/cs5536_isa.c b/arch/mips/loongson/common/cs5536/cs5536_isa.c index a6eb2e853d94..924be39e7733 100644 --- a/arch/mips/loongson/common/cs5536/cs5536_isa.c +++ b/arch/mips/loongson/common/cs5536/cs5536_isa.c @@ -13,6 +13,7 @@ * option) any later version. */ +#include <linux/pci.h> #include <cs5536/cs5536.h> #include <cs5536/cs5536_pci.h> @@ -314,3 +315,16 @@ u32 pci_isa_read_reg(int reg) return conf_data; } + +/* + * The mfgpt timer interrupt is running early, so we must keep the south bridge + * mmio always enabled. Otherwise we may race with the PCI configuration which + * may temporarily disable it. When that happens and the timer interrupt fires, + * we are not able to clear it and the system will hang. + */ +static void cs5536_isa_mmio_always_on(struct pci_dev *dev) +{ + dev->mmio_always_on = 1; +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, + PCI_CLASS_BRIDGE_ISA, 8, cs5536_isa_mmio_always_on); diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c index bc739d4bab2e..4dc2f5fa3f67 100644 --- a/arch/mips/loongson/lemote-2f/clock.c +++ b/arch/mips/loongson/lemote-2f/clock.c @@ -121,7 +121,8 @@ int clk_set_rate(struct clk *clk, unsigned long rate) clk->rate = rate; regval = LOONGSON_CHIPCFG0; - regval = (regval & ~0x7) | (loongson2_clockmod_table[i].index - 1); + regval = (regval & ~0x7) | + (loongson2_clockmod_table[i].driver_data - 1); LOONGSON_CHIPCFG0 = regval; return ret; diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index f03771900813..e773659ccf9f 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -471,6 +471,9 @@ int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, unsigned int fcr31; unsigned int bit; + if (!cpu_has_mmips) + return 0; + switch (insn.mm_i_format.opcode) { case mm_pool32a_op: if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index e87aae1f2e80..7f4f93ab22b7 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -4,7 +4,7 @@ obj-y += cache.o dma-default.o extable.o fault.o \ gup.o init.o mmap.o page.o page-funcs.o \ - tlbex.o tlbex-fault.o uasm-mips.o + tlbex.o tlbex-fault.o tlb-funcs.o uasm-mips.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c index 576add33bf5b..ee5c1ff861ae 100644 --- a/arch/mips/mm/cerr-sb1.c +++ b/arch/mips/mm/cerr-sb1.c @@ -182,11 +182,7 @@ asmlinkage void sb1_cache_error(void) #ifdef CONFIG_SIBYTE_BW_TRACE /* Freeze the trace buffer now */ -#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) - csr_out32(M_BCM1480_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG)); -#else csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG)); -#endif printk("Trace buffer frozen\n"); #endif diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index caf92ecb37d6..aaccf1c10699 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -246,6 +246,9 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, if (!plat_device_is_coherent(dev)) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg->dma_length = sg->length; +#endif sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + sg->offset; } diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 0fead53d1c26..85df1cd8d446 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -5,6 +5,7 @@ * * Copyright (C) 1995 - 2000 by Ralf Baechle */ +#include <linux/context_tracking.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> @@ -32,8 +33,8 @@ * and the problem, and then passes it off to one of the appropriate * routines. */ -asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write, - unsigned long address) +static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, + unsigned long address) { struct vm_area_struct * vma = NULL; struct task_struct *tsk = current; @@ -312,3 +313,13 @@ vmalloc_fault: } #endif } + +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long write, unsigned long address) +{ + enum ctx_state prev_state; + + prev_state = exception_enter(); + __do_page_fault(regs, write, address); + exception_exit(prev_state); +} diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 9b973e0af9cb..4e73f10a7519 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -359,11 +359,24 @@ void __init paging_init(void) static struct kcore_list kcore_kseg0; #endif -void __init mem_init(void) +static inline void mem_init_free_highmem(void) { - unsigned long codesize, reservedpages, datasize, initsize; - unsigned long tmp, ram; +#ifdef CONFIG_HIGHMEM + unsigned long tmp; + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { + struct page *page = pfn_to_page(tmp); + + if (!page_is_ram(tmp)) + SetPageReserved(page); + else + free_highmem_page(page); + } +#endif +} + +void __init mem_init(void) +{ #ifdef CONFIG_HIGHMEM #ifdef CONFIG_DISCONTIGMEM #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" @@ -374,34 +387,10 @@ void __init mem_init(void) #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - totalram_pages += free_all_bootmem(); + free_all_bootmem(); setup_zero_pages(); /* Setup zeroed pages. */ - - reservedpages = ram = 0; - for (tmp = 0; tmp < max_low_pfn; tmp++) - if (page_is_ram(tmp) && pfn_valid(tmp)) { - ram++; - if (PageReserved(pfn_to_page(tmp))) - reservedpages++; - } - num_physpages = ram; - -#ifdef CONFIG_HIGHMEM - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - if (!page_is_ram(tmp)) { - SetPageReserved(page); - continue; - } - free_highmem_page(page); - } - num_physpages += totalhigh_pages; -#endif - - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + mem_init_free_highmem(); + mem_init_print_info(NULL); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) @@ -410,16 +399,6 @@ void __init mem_init(void) kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4, KCORE_TEXT); #endif - - printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " - "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", - nr_free_pages() << (PAGE_SHIFT-10), - ram << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10, - totalhigh_pages << (PAGE_SHIFT-10)); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ @@ -440,7 +419,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 7e5fe2790d8a..f1baadd56e82 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -158,11 +158,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 4eb8dcfaf1ce..2c0bd580b9da 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -232,7 +232,7 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off) uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); } - } + } } extern u32 __clear_page_start; diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S new file mode 100644 index 000000000000..30a494db99c2 --- /dev/null +++ b/arch/mips/mm/tlb-funcs.S @@ -0,0 +1,37 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Micro-assembler generated tlb handler functions. + * + * Copyright (C) 2013 Broadcom Corporation. + * + * Based on mm/page-funcs.c + * Copyright (C) 2012 MIPS Technologies, Inc. + * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org> + */ +#include <asm/asm.h> +#include <asm/regdef.h> + +#define FASTPATH_SIZE 128 + +LEAF(tlbmiss_handler_setup_pgd) + .space 16 * 4 +END(tlbmiss_handler_setup_pgd) +EXPORT(tlbmiss_handler_setup_pgd_end) + +LEAF(handle_tlbm) + .space FASTPATH_SIZE * 4 +END(handle_tlbm) +EXPORT(handle_tlbm_end) + +LEAF(handle_tlbs) + .space FASTPATH_SIZE * 4 +END(handle_tlbs) +EXPORT(handle_tlbs_end) + +LEAF(handle_tlbl) + .space FASTPATH_SIZE * 4 +END(handle_tlbl) +EXPORT(handle_tlbl_end) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index afeef93f81a7..9ab0f907a52c 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -305,6 +305,17 @@ static int check_for_high_segbits __cpuinitdata; static unsigned int kscratch_used_mask __cpuinitdata; +static inline int __maybe_unused c0_kscratch(void) +{ + switch (current_cpu_type()) { + case CPU_XLP: + case CPU_XLR: + return 22; + default: + return 31; + } +} + static int __cpuinit allocate_kscratch(void) { int r; @@ -334,9 +345,9 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p) int smp_processor_id_sel; int smp_processor_id_shift; - if (scratch_reg > 0) { + if (scratch_reg >= 0) { /* Save in CPU local C0_KScratch? */ - UASM_i_MTC0(p, 1, 31, scratch_reg); + UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); r.r1 = K0; r.r2 = K1; r.r3 = 1; @@ -384,8 +395,8 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p) static void __cpuinit build_restore_work_registers(u32 **p) { - if (scratch_reg > 0) { - UASM_i_MFC0(p, 1, 31, scratch_reg); + if (scratch_reg >= 0) { + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); return; } /* K0 already points to save area, restore $1 and $2 */ @@ -673,8 +684,8 @@ static __cpuinit void build_restore_pagemask(u32 **p, uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_il_b(p, r, lid); } - if (scratch_reg > 0) - UASM_i_MFC0(p, 1, 31, scratch_reg); + if (scratch_reg >= 0) + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); else UASM_i_LW(p, 1, scratchpad_offset(0), 0); } else { @@ -817,7 +828,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, #ifdef CONFIG_MIPS_PGD_C0_CONTEXT if (pgd_reg != -1) { /* pgd is in pgd_reg */ - UASM_i_MFC0(p, ptr, 31, pgd_reg); + UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); } else { /* * &pgd << 11 stored in CONTEXT [23..63]. @@ -929,8 +940,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_jr(p, ptr); if (mode == refill_scratch) { - if (scratch_reg > 0) - UASM_i_MFC0(p, 1, 31, scratch_reg); + if (scratch_reg >= 0) + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); else UASM_i_LW(p, 1, scratchpad_offset(0), 0); } else { @@ -961,7 +972,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) uasm_i_srl(p, ptr, ptr, 19); #else /* - * smp_processor_id() << 3 is stored in CONTEXT. + * smp_processor_id() << 2 is stored in CONTEXT. */ uasm_i_mfc0(p, ptr, C0_CONTEXT); UASM_i_LA_mostly(p, tmp, pgdc); @@ -1096,7 +1107,7 @@ struct mips_huge_tlb_info { static struct mips_huge_tlb_info __cpuinit build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, - unsigned int ptr, int c0_scratch) + unsigned int ptr, int c0_scratch_reg) { struct mips_huge_tlb_info rv; unsigned int even, odd; @@ -1110,12 +1121,12 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_MFC0(p, tmp, C0_BADVADDR); if (pgd_reg != -1) - UASM_i_MFC0(p, ptr, 31, pgd_reg); + UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); else UASM_i_MFC0(p, ptr, C0_CONTEXT); - if (c0_scratch >= 0) - UASM_i_MTC0(p, scratch, 31, c0_scratch); + if (c0_scratch_reg >= 0) + UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); else UASM_i_SW(p, scratch, scratchpad_offset(0), 0); @@ -1130,14 +1141,14 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, } } else { if (pgd_reg != -1) - UASM_i_MFC0(p, ptr, 31, pgd_reg); + UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); else UASM_i_MFC0(p, ptr, C0_CONTEXT); UASM_i_MFC0(p, tmp, C0_BADVADDR); - if (c0_scratch >= 0) - UASM_i_MTC0(p, scratch, 31, c0_scratch); + if (c0_scratch_reg >= 0) + UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); else UASM_i_SW(p, scratch, scratchpad_offset(0), 0); @@ -1242,8 +1253,8 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, } UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ - if (c0_scratch >= 0) { - UASM_i_MFC0(p, scratch, 31, c0_scratch); + if (c0_scratch_reg >= 0) { + UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); build_tlb_write_entry(p, l, r, tlb_random); uasm_l_leave(l, *p); rv.restore_scratch = 1; @@ -1286,7 +1297,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) memset(relocs, 0, sizeof(relocs)); memset(final_handler, 0, sizeof(final_handler)); - if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { + if ((scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, scratch_reg); vmalloc_mode = refill_scratch; @@ -1444,27 +1455,25 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) dump_handler("r4000_tlb_refill", (u32 *)ebase, 64); } -/* - * 128 instructions for the fastpath handler is generous and should - * never be exceeded. - */ -#define FASTPATH_SIZE 128 +extern u32 handle_tlbl[], handle_tlbl_end[]; +extern u32 handle_tlbs[], handle_tlbs_end[]; +extern u32 handle_tlbm[], handle_tlbm_end[]; -u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; -u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; -u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; #ifdef CONFIG_MIPS_PGD_C0_CONTEXT -u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned; +extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; static void __cpuinit build_r4000_setup_pgd(void) { const int a0 = 4; const int a1 = 5; u32 *p = tlbmiss_handler_setup_pgd_array; + const int tlbmiss_handler_setup_pgd_size = + tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array)); + memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size * + sizeof(tlbmiss_handler_setup_pgd[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1490,17 +1499,17 @@ static void __cpuinit build_r4000_setup_pgd(void) } else { /* PGD in c0_KScratch */ uasm_i_jr(&p, 31); - UASM_i_MTC0(&p, a0, 31, pgd_reg); + UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); } - if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)) - panic("tlbmiss_handler_setup_pgd_array space exceeded"); + if (p >= tlbmiss_handler_setup_pgd_end) + panic("tlbmiss_handler_setup_pgd space exceeded"); + uasm_resolve_relocs(relocs, labels); - pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n", - (unsigned int)(p - tlbmiss_handler_setup_pgd_array)); + pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", + (unsigned int)(p - tlbmiss_handler_setup_pgd)); - dump_handler("tlbmiss_handler", - tlbmiss_handler_setup_pgd_array, - ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)); + dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd, + tlbmiss_handler_setup_pgd_size); } #endif @@ -1745,10 +1754,11 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, static void __cpuinit build_r3000_tlb_load_handler(void) { u32 *p = handle_tlbl; + const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(handle_tlbl, 0, sizeof(handle_tlbl)); + memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1762,23 +1772,24 @@ static void __cpuinit build_r3000_tlb_load_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbl) > FASTPATH_SIZE) + if (p >= handle_tlbl_end) panic("TLB load handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbl)); - dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); + dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); } static void __cpuinit build_r3000_tlb_store_handler(void) { u32 *p = handle_tlbs; + const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(handle_tlbs, 0, sizeof(handle_tlbs)); + memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1792,23 +1803,24 @@ static void __cpuinit build_r3000_tlb_store_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbs) > FASTPATH_SIZE) + if (p >= handle_tlbs) panic("TLB store handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbs)); - dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); + dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); } static void __cpuinit build_r3000_tlb_modify_handler(void) { u32 *p = handle_tlbm; + const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(handle_tlbm, 0, sizeof(handle_tlbm)); + memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1822,14 +1834,14 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbm) > FASTPATH_SIZE) + if (p >= handle_tlbm_end) panic("TLB modify handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); - dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); + dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size); } #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ @@ -1893,11 +1905,12 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, static void __cpuinit build_r4000_tlb_load_handler(void) { u32 *p = handle_tlbl; + const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; struct work_registers wr; - memset(handle_tlbl, 0, sizeof(handle_tlbl)); + memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1935,6 +1948,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_i_nop(&p); uasm_i_tlbr(&p); + + switch (current_cpu_type()) { + default: + if (cpu_has_mips_r2) { + uasm_i_ehb(&p); + + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + break; + } + } + /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); @@ -1989,6 +2015,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_i_nop(&p); uasm_i_tlbr(&p); + + switch (current_cpu_type()) { + default: + if (cpu_has_mips_r2) { + uasm_i_ehb(&p); + + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + break; + } + } + /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); @@ -2036,24 +2075,25 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbl) > FASTPATH_SIZE) + if (p >= handle_tlbl_end) panic("TLB load handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbl)); - dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); + dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); } static void __cpuinit build_r4000_tlb_store_handler(void) { u32 *p = handle_tlbs; + const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; struct work_registers wr; - memset(handle_tlbs, 0, sizeof(handle_tlbs)); + memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -2090,24 +2130,25 @@ static void __cpuinit build_r4000_tlb_store_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbs) > FASTPATH_SIZE) + if (p >= handle_tlbs_end) panic("TLB store handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbs)); - dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); + dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); } static void __cpuinit build_r4000_tlb_modify_handler(void) { u32 *p = handle_tlbm; + const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; struct work_registers wr; - memset(handle_tlbm, 0, sizeof(handle_tlbm)); + memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -2145,14 +2186,28 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbm) > FASTPATH_SIZE) + if (p >= handle_tlbm_end) panic("TLB modify handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); - dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); + dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); +} + +static void __cpuinit flush_tlb_handlers(void) +{ + local_flush_icache_range((unsigned long)handle_tlbl, + (unsigned long)handle_tlbl_end); + local_flush_icache_range((unsigned long)handle_tlbs, + (unsigned long)handle_tlbs_end); + local_flush_icache_range((unsigned long)handle_tlbm, + (unsigned long)handle_tlbm_end); +#ifdef CONFIG_MIPS_PGD_C0_CONTEXT + local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, + (unsigned long)tlbmiss_handler_setup_pgd_end); +#endif } void __cpuinit build_tlb_refill_handler(void) @@ -2187,6 +2242,7 @@ void __cpuinit build_tlb_refill_handler(void) build_r3000_tlb_load_handler(); build_r3000_tlb_store_handler(); build_r3000_tlb_modify_handler(); + flush_tlb_handlers(); run_once++; } #else @@ -2214,23 +2270,10 @@ void __cpuinit build_tlb_refill_handler(void) build_r4000_tlb_modify_handler(); if (!cpu_has_local_ebase) build_r4000_tlb_refill_handler(); + flush_tlb_handlers(); run_once++; } if (cpu_has_local_ebase) build_r4000_tlb_refill_handler(); } } - -void __cpuinit flush_tlb_handlers(void) -{ - local_flush_icache_range((unsigned long)handle_tlbl, - (unsigned long)handle_tlbl + sizeof(handle_tlbl)); - local_flush_icache_range((unsigned long)handle_tlbs, - (unsigned long)handle_tlbs + sizeof(handle_tlbs)); - local_flush_icache_range((unsigned long)handle_tlbm, - (unsigned long)handle_tlbm + sizeof(handle_tlbm)); -#ifdef CONFIG_MIPS_PGD_C0_CONTEXT - local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array, - (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm)); -#endif -} diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index 0388fc8b5613..72fdedbf76db 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile @@ -10,7 +10,6 @@ obj-y := malta-amon.o malta-display.o malta-init.o \ malta-reset.o malta-setup.o malta-time.o obj-$(CONFIG_EARLY_PRINTK) += malta-console.o -obj-$(CONFIG_PCI) += malta-pci.o # FIXME FIXME FIXME obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 0a1339ac3ec8..c69da3734699 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -422,8 +422,10 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { */ int __init gcmp_probe(unsigned long addr, unsigned long size) { - if (mips_revision_sconid != MIPS_REVISION_SCON_ROCIT) { + if ((mips_revision_sconid != MIPS_REVISION_SCON_ROCIT) && + (mips_revision_sconid != MIPS_REVISION_SCON_GT64120)) { gcmp_present = 0; + pr_debug("GCMP NOT present\n"); return gcmp_present; } diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c index 329420536241..d627d4b2b47f 100644 --- a/arch/mips/mti-malta/malta-reset.c +++ b/arch/mips/mti-malta/malta-reset.c @@ -1,33 +1,18 @@ /* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. - * - * ######################################################################## - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * ######################################################################## - * - * Reset the MIPS boards. - * */ -#include <linux/init.h> +#include <linux/io.h> #include <linux/pm.h> -#include <asm/io.h> #include <asm/reboot.h> -#include <asm/mips-boards/generic.h> + +#define SOFTRES_REG 0x1f000500 +#define GORESET 0x42 static void mips_machine_restart(char *command) { @@ -45,7 +30,6 @@ static void mips_machine_halt(void) __raw_writel(GORESET, softres_reg); } - static int __init mips_reboot_setup(void) { _machine_restart = mips_machine_restart; @@ -54,5 +38,4 @@ static int __init mips_reboot_setup(void) return 0; } - arch_initcall(mips_reboot_setup); diff --git a/arch/mips/mti-sead3/sead3-reset.c b/arch/mips/mti-sead3/sead3-reset.c index 20475c5e7b9c..e6fb24414a70 100644 --- a/arch/mips/mti-sead3/sead3-reset.c +++ b/arch/mips/mti-sead3/sead3-reset.c @@ -9,7 +9,9 @@ #include <linux/pm.h> #include <asm/reboot.h> -#include <asm/mips-boards/generic.h> + +#define SOFTRES_REG 0x1f000050 +#define GORESET 0x4d static void mips_machine_restart(char *command) { @@ -35,5 +37,4 @@ static int __init mips_reboot_setup(void) return 0; } - arch_initcall(mips_reboot_setup); diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig index e0873a31ebaa..2447bf97d35a 100644 --- a/arch/mips/netlogic/Kconfig +++ b/arch/mips/netlogic/Kconfig @@ -51,4 +51,15 @@ endif config NLM_COMMON bool +config IOMMU_HELPER + bool + +config NEED_SG_DMA_LENGTH + bool + +config SWIOTLB + def_bool y + select NEED_SG_DMA_LENGTH + select IOMMU_HELPER + endif diff --git a/arch/mips/netlogic/common/Makefile b/arch/mips/netlogic/common/Makefile index 291372a086f5..362739d62b1d 100644 --- a/arch/mips/netlogic/common/Makefile +++ b/arch/mips/netlogic/common/Makefile @@ -1,3 +1,5 @@ obj-y += irq.o time.o +obj-y += nlm-dma.o +obj-y += reset.o obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_EARLY_PRINTK) += earlycons.o diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c index 9f84c60bf535..73facb2b33bb 100644 --- a/arch/mips/netlogic/common/irq.c +++ b/arch/mips/netlogic/common/irq.c @@ -253,13 +253,12 @@ asmlinkage void plat_irq_dispatch(void) node = nlm_nodeid(); eirr = read_c0_eirr_and_eimr(); - - i = __ilog2_u64(eirr); - if (i == -1) + if (eirr == 0) return; + i = __ffs64(eirr); /* per-CPU IRQs don't need translation */ - if (eirr & PERCPU_IRQ_MASK) { + if (i < PIC_IRQ_BASE) { do_IRQ(i); return; } diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c new file mode 100644 index 000000000000..f3d4ae87abc7 --- /dev/null +++ b/arch/mips/netlogic/common/nlm-dma.c @@ -0,0 +1,107 @@ +/* +* Copyright (C) 2003-2013 Broadcom Corporation +* All Rights Reserved + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the Broadcom + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> +#include <linux/bootmem.h> +#include <linux/export.h> +#include <linux/swiotlb.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/mm.h> + +#include <asm/bootinfo.h> + +static char *nlm_swiotlb; + +static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) +{ + void *ret; + + if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) + return ret; + + /* ignore region specifiers */ + gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); + +#ifdef CONFIG_ZONE_DMA32 + if (dev->coherent_dma_mask <= DMA_BIT_MASK(32)) + gfp |= __GFP_DMA32; +#endif + + /* Don't invoke OOM killer */ + gfp |= __GFP_NORETRY; + + return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); +} + +static void nlm_dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) +{ + int order = get_order(size); + + if (dma_release_from_coherent(dev, order, vaddr)) + return; + + swiotlb_free_coherent(dev, size, vaddr, dma_handle); +} + +struct dma_map_ops nlm_swiotlb_dma_ops = { + .alloc = nlm_dma_alloc_coherent, + .free = nlm_dma_free_coherent, + .map_page = swiotlb_map_page, + .unmap_page = swiotlb_unmap_page, + .map_sg = swiotlb_map_sg_attrs, + .unmap_sg = swiotlb_unmap_sg_attrs, + .sync_single_for_cpu = swiotlb_sync_single_for_cpu, + .sync_single_for_device = swiotlb_sync_single_for_device, + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = swiotlb_sync_sg_for_device, + .mapping_error = swiotlb_dma_mapping_error, + .dma_supported = swiotlb_dma_supported +}; + +void __init plat_swiotlb_setup(void) +{ + size_t swiotlbsize; + unsigned long swiotlb_nslabs; + + swiotlbsize = 1 << 20; /* 1 MB for now */ + swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; + swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE); + swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT; + + nlm_swiotlb = alloc_bootmem_low_pages(swiotlbsize); + swiotlb_init_with_tbl(nlm_swiotlb, swiotlb_nslabs, 1); +} diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S new file mode 100644 index 000000000000..adb18288a6c0 --- /dev/null +++ b/arch/mips/netlogic/common/reset.S @@ -0,0 +1,230 @@ +/* + * Copyright 2003-2013 Broadcom Corporation. + * All Rights Reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the Broadcom + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/init.h> + +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/regdef.h> +#include <asm/mipsregs.h> +#include <asm/stackframe.h> +#include <asm/asmmacro.h> +#include <asm/addrspace.h> + +#include <asm/netlogic/common.h> + +#include <asm/netlogic/xlp-hal/iomap.h> +#include <asm/netlogic/xlp-hal/xlp.h> +#include <asm/netlogic/xlp-hal/sys.h> +#include <asm/netlogic/xlp-hal/cpucontrol.h> + +#define CP0_EBASE $15 +#define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \ + XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \ + SYS_CPU_NONCOHERENT_MODE * 4 + +/* Enable XLP features and workarounds in the LSU */ +.macro xlp_config_lsu + li t0, LSU_DEFEATURE + mfcr t1, t0 + + lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */ + or t1, t1, t2 + mtcr t1, t0 + + li t0, ICU_DEFEATURE + mfcr t1, t0 + ori t1, 0x1000 /* Enable Icache partitioning */ + mtcr t1, t0 + + li t0, SCHED_DEFEATURE + lui t1, 0x0100 /* Disable BRU accepting ALU ops */ + mtcr t1, t0 +.endm + +/* + * Low level flush for L1D cache on XLP, the normal cache ops does + * not do the complete and correct cache flush. + */ +.macro xlp_flush_l1_dcache + li t0, LSU_DEBUG_DATA0 + li t1, LSU_DEBUG_ADDR + li t2, 0 /* index */ + li t3, 0x1000 /* loop count */ +1: + sll v0, t2, 5 + mtcr zero, t0 + ori v1, v0, 0x3 /* way0 | write_enable | write_active */ + mtcr v1, t1 +2: + mfcr v1, t1 + andi v1, 0x1 /* wait for write_active == 0 */ + bnez v1, 2b + nop + mtcr zero, t0 + ori v1, v0, 0x7 /* way1 | write_enable | write_active */ + mtcr v1, t1 +3: + mfcr v1, t1 + andi v1, 0x1 /* wait for write_active == 0 */ + bnez v1, 3b + nop + addi t2, 1 + bne t3, t2, 1b + nop +.endm + +/* + * nlm_reset_entry will be copied to the reset entry point for + * XLR and XLP. The XLP cores start here when they are woken up. This + * is also the NMI entry point. + * + * We use scratch reg 6/7 to save k0/k1 and check for NMI first. + * + * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS + * location, this will have the thread mask (used when core is woken up) + * and the current NMI handler in case we reached here for an NMI. + * + * When a core or thread is newly woken up, it marks itself ready and + * loops in a 'wait'. When the CPU really needs waking up, we send an NMI + * IPI to it, with the NMI handler set to prom_boot_secondary_cpus + */ + .set noreorder + .set noat + .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ + +FEXPORT(nlm_reset_entry) + dmtc0 k0, $22, 6 + dmtc0 k1, $22, 7 + mfc0 k0, CP0_STATUS + li k1, 0x80000 + and k1, k0, k1 + beqz k1, 1f /* go to real reset entry */ + nop + li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */ + ld k0, BOOT_NMI_HANDLER(k1) + jr k0 + nop + +1: /* Entry point on core wakeup */ + mfc0 t0, CP0_EBASE, 1 + mfc0 t1, CP0_EBASE, 1 + srl t1, 5 + andi t1, 0x3 /* t1 <- node */ + li t2, 0x40000 + mul t3, t2, t1 /* t3 = node * 0x40000 */ + srl t0, t0, 2 + and t0, t0, 0x7 /* t0 <- core */ + li t1, 0x1 + sll t0, t1, t0 + nor t0, t0, zero /* t0 <- ~(1 << core) */ + li t2, SYS_CPU_COHERENT_BASE(0) + add t2, t2, t3 /* t2 <- SYS offset for node */ + lw t1, 0(t2) + and t1, t1, t0 + sw t1, 0(t2) + + /* read back to ensure complete */ + lw t1, 0(t2) + sync + + /* Configure LSU on Non-0 Cores. */ + xlp_config_lsu + /* FALL THROUGH */ + +/* + * Wake up sibling threads from the initial thread in + * a core. + */ +EXPORT(nlm_boot_siblings) + /* core L1D flush before enable threads */ + xlp_flush_l1_dcache + /* Enable hw threads by writing to MAP_THREADMODE of the core */ + li t0, CKSEG1ADDR(RESET_DATA_PHYS) + lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */ + li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE) + mfcr t2, t0 + or t2, t2, t1 + mtcr t2, t0 + + /* + * The new hardware thread starts at the next instruction + * For all the cases other than core 0 thread 0, we will + * jump to the secondary wait function. + */ + mfc0 v0, CP0_EBASE, 1 + andi v0, 0x3ff /* v0 <- node/core */ + + beqz v0, 4f /* boot cpu (cpuid == 0)? */ + nop + + /* setup status reg */ + move t1, zero +#ifdef CONFIG_64BIT + ori t1, ST0_KX +#endif + mtc0 t1, CP0_STATUS + + /* mark CPU ready, careful here, previous mtcr trashed registers */ + li t3, CKSEG1ADDR(RESET_DATA_PHYS) + ADDIU t1, t3, BOOT_CPU_READY + sll v1, v0, 2 + PTR_ADDU t1, v1 + li t2, 1 + sw t2, 0(t1) + /* Wait until NMI hits */ +3: wait + b 3b + nop + + /* + * For the boot CPU, we have to restore registers and + * return + */ +4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */ + li t1, 0xfadebeef + dmtc0 t1, $4, 2 /* restore SP from UserLocal */ + PTR_SUBU sp, t0, PT_SIZE + RESTORE_ALL + jr ra + nop +EXPORT(nlm_reset_entry_end) + +LEAF(nlm_init_boot_cpu) +#ifdef CONFIG_CPU_XLP + xlp_config_lsu +#endif + jr ra + nop +END(nlm_init_boot_cpu) diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index ffba52489bef..885d293b61da 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -145,7 +145,6 @@ void nlm_cpus_done(void) * Boot all other cpus in the system, initialize them, and bring them into * the boot function */ -int nlm_cpu_ready[NR_CPUS]; unsigned long nlm_next_gp; unsigned long nlm_next_sp; static cpumask_t phys_cpu_present_mask; @@ -168,6 +167,7 @@ void __init nlm_smp_setup(void) { unsigned int boot_cpu; int num_cpus, i, ncore; + volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY); char buf[64]; boot_cpu = hard_smp_processor_id(); @@ -181,10 +181,10 @@ void __init nlm_smp_setup(void) num_cpus = 1; for (i = 0; i < NR_CPUS; i++) { /* - * nlm_cpu_ready array is not set for the boot_cpu, + * cpu_ready array is not set for the boot_cpu, * it is only set for ASPs (see smpboot.S) */ - if (nlm_cpu_ready[i]) { + if (cpu_ready[i]) { cpumask_set_cpu(i, &phys_cpu_present_mask); __cpu_number_map[i] = num_cpus; __cpu_logical_map[num_cpus] = i; @@ -254,21 +254,15 @@ unsupp: int __cpuinit nlm_wakeup_secondary_cpus(void) { - unsigned long reset_vec; - char *reset_data; + u32 *reset_data; int threadmode; - /* Update reset entry point with CPU init code */ - reset_vec = CKSEG1ADDR(RESET_VEC_PHYS); - memcpy((void *)reset_vec, (void *)nlm_reset_entry, - (nlm_reset_entry_end - nlm_reset_entry)); - /* verify the mask and setup core config variables */ threadmode = nlm_parse_cpumask(&nlm_cpumask); /* Setup CPU init parameters */ - reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS); - *(int *)(reset_data + BOOT_THREAD_MODE) = threadmode; + reset_data = nlm_get_boot_data(BOOT_THREAD_MODE); + *reset_data = threadmode; #ifdef CONFIG_CPU_XLP xlp_wakeup_secondary_cpus(); diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S index 026517488584..528c46c5a170 100644 --- a/arch/mips/netlogic/common/smpboot.S +++ b/arch/mips/netlogic/common/smpboot.S @@ -50,197 +50,12 @@ #include <asm/netlogic/xlp-hal/cpucontrol.h> #define CP0_EBASE $15 -#define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \ - XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \ - SYS_CPU_NONCOHERENT_MODE * 4 - -#define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */ - -/* Enable XLP features and workarounds in the LSU */ -.macro xlp_config_lsu - li t0, LSU_DEFEATURE - mfcr t1, t0 - - lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */ - or t1, t1, t2 -#ifdef XLP_AX_WORKAROUND - li t2, ~0xe /* S1RCM */ - and t1, t1, t2 -#endif - mtcr t1, t0 - - li t0, ICU_DEFEATURE - mfcr t1, t0 - ori t1, 0x1000 /* Enable Icache partitioning */ - mtcr t1, t0 - - -#ifdef XLP_AX_WORKAROUND - li t0, SCHED_DEFEATURE - lui t1, 0x0100 /* Disable BRU accepting ALU ops */ - mtcr t1, t0 -#endif -.endm - -/* - * This is the code that will be copied to the reset entry point for - * XLR and XLP. The XLP cores start here when they are woken up. This - * is also the NMI entry point. - */ -.macro xlp_flush_l1_dcache - li t0, LSU_DEBUG_DATA0 - li t1, LSU_DEBUG_ADDR - li t2, 0 /* index */ - li t3, 0x1000 /* loop count */ -1: - sll v0, t2, 5 - mtcr zero, t0 - ori v1, v0, 0x3 /* way0 | write_enable | write_active */ - mtcr v1, t1 -2: - mfcr v1, t1 - andi v1, 0x1 /* wait for write_active == 0 */ - bnez v1, 2b - nop - mtcr zero, t0 - ori v1, v0, 0x7 /* way1 | write_enable | write_active */ - mtcr v1, t1 -3: - mfcr v1, t1 - andi v1, 0x1 /* wait for write_active == 0 */ - bnez v1, 3b - nop - addi t2, 1 - bne t3, t2, 1b - nop -.endm - -/* - * The cores can come start when they are woken up. This is also the NMI - * entry, so check that first. - * - * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS - * location, this will have the thread mask (used when core is woken up) - * and the current NMI handler in case we reached here for an NMI. - * - * When a core or thread is newly woken up, it loops in a 'wait'. When - * the CPU really needs waking up, we send an NMI to it, with the NMI - * handler set to prom_boot_secondary_cpus - */ .set noreorder .set noat - .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ - -FEXPORT(nlm_reset_entry) - dmtc0 k0, $22, 6 - dmtc0 k1, $22, 7 - mfc0 k0, CP0_STATUS - li k1, 0x80000 - and k1, k0, k1 - beqz k1, 1f /* go to real reset entry */ - nop - li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */ - ld k0, BOOT_NMI_HANDLER(k1) - jr k0 - nop - -1: /* Entry point on core wakeup */ - mfc0 t0, CP0_EBASE, 1 - mfc0 t1, CP0_EBASE, 1 - srl t1, 5 - andi t1, 0x3 /* t1 <- node */ - li t2, 0x40000 - mul t3, t2, t1 /* t3 = node * 0x40000 */ - srl t0, t0, 2 - and t0, t0, 0x7 /* t0 <- core */ - li t1, 0x1 - sll t0, t1, t0 - nor t0, t0, zero /* t0 <- ~(1 << core) */ - li t2, SYS_CPU_COHERENT_BASE(0) - add t2, t2, t3 /* t2 <- SYS offset for node */ - lw t1, 0(t2) - and t1, t1, t0 - sw t1, 0(t2) - - /* read back to ensure complete */ - lw t1, 0(t2) - sync - - /* Configure LSU on Non-0 Cores. */ - xlp_config_lsu - /* FALL THROUGH */ - -/* - * Wake up sibling threads from the initial thread in - * a core. - */ -EXPORT(nlm_boot_siblings) - /* core L1D flush before enable threads */ - xlp_flush_l1_dcache - /* Enable hw threads by writing to MAP_THREADMODE of the core */ - li t0, CKSEG1ADDR(RESET_DATA_PHYS) - lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */ - li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE) - mfcr t2, t0 - or t2, t2, t1 - mtcr t2, t0 - - /* - * The new hardware thread starts at the next instruction - * For all the cases other than core 0 thread 0, we will - * jump to the secondary wait function. - */ - mfc0 v0, CP0_EBASE, 1 - andi v0, 0x3ff /* v0 <- node/core */ - - /* Init MMU in the first thread after changing THREAD_MODE - * register (Ax Errata?) - */ - andi v1, v0, 0x3 /* v1 <- thread id */ - bnez v1, 2f - nop - - li t0, MMU_SETUP - li t1, 0 - mtcr t1, t0 - _ehb - -2: beqz v0, 4f /* boot cpu (cpuid == 0)? */ - nop - - /* setup status reg */ - move t1, zero -#ifdef CONFIG_64BIT - ori t1, ST0_KX -#endif - mtc0 t1, CP0_STATUS - /* mark CPU ready */ - PTR_LA t1, nlm_cpu_ready - sll v1, v0, 2 - PTR_ADDU t1, v1 - li t2, 1 - sw t2, 0(t1) - /* Wait until NMI hits */ -3: wait - j 3b - nop - - /* - * For the boot CPU, we have to restore registers and - * return - */ -4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */ - li t1, 0xfadebeef - dmtc0 t1, $4, 2 /* restore SP from UserLocal */ - PTR_SUBU sp, t0, PT_SIZE - RESTORE_ALL - jr ra - nop -EXPORT(nlm_reset_entry_end) + .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */ - xlp_config_lsu dmtc0 sp, $4, 2 /* SP saved in UserLocal */ SAVE_ALL sync @@ -294,8 +109,9 @@ NESTED(nlm_rmiboot_preboot, 16, sp) andi t2, t0, 0x3 /* thread num */ sll t0, 2 /* offset in cpu array */ - PTR_LA t1, nlm_cpu_ready /* mark CPU ready */ - PTR_ADDU t1, t0 + li t3, CKSEG1ADDR(RESET_DATA_PHYS) + ADDIU t1, t3, BOOT_CPU_READY + ADDU t1, t0 li t3, 1 sw t3, 0(t1) @@ -321,7 +137,7 @@ NESTED(nlm_rmiboot_preboot, 16, sp) mtcr t1, t0 /* update core control */ 1: wait - j 1b + b 1b nop END(nlm_rmiboot_preboot) __FINIT diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile index a84d6ed3746c..85ac4a892ced 100644 --- a/arch/mips/netlogic/xlp/Makefile +++ b/arch/mips/netlogic/xlp/Makefile @@ -1,3 +1,3 @@ -obj-y += setup.o nlm_hal.o +obj-y += setup.o nlm_hal.o cop2-ex.o dt.o obj-$(CONFIG_SMP) += wakeup.o obj-$(CONFIG_USB) += usb-init.o diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c new file mode 100644 index 000000000000..52bc5de42005 --- /dev/null +++ b/arch/mips/netlogic/xlp/cop2-ex.c @@ -0,0 +1,118 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Broadcom Corporation. + * + * based on arch/mips/cavium-octeon/cpu.c + * Copyright (C) 2009 Wind River Systems, + * written by Ralf Baechle <ralf@linux-mips.org> + */ +#include <linux/init.h> +#include <linux/irqflags.h> +#include <linux/notifier.h> +#include <linux/prefetch.h> +#include <linux/sched.h> + +#include <asm/cop2.h> +#include <asm/current.h> +#include <asm/mipsregs.h> +#include <asm/page.h> + +#include <asm/netlogic/mips-extns.h> + +/* + * 64 bit ops are done in inline assembly to support 32 bit + * compilation + */ +void nlm_cop2_save(struct nlm_cop2_state *r) +{ + asm volatile( + ".set push\n" + ".set noat\n" + "dmfc2 $1, $0, 0\n" + "sd $1, 0(%1)\n" + "dmfc2 $1, $0, 1\n" + "sd $1, 8(%1)\n" + "dmfc2 $1, $0, 2\n" + "sd $1, 16(%1)\n" + "dmfc2 $1, $0, 3\n" + "sd $1, 24(%1)\n" + "dmfc2 $1, $1, 0\n" + "sd $1, 0(%2)\n" + "dmfc2 $1, $1, 1\n" + "sd $1, 8(%2)\n" + "dmfc2 $1, $1, 2\n" + "sd $1, 16(%2)\n" + "dmfc2 $1, $1, 3\n" + "sd $1, 24(%2)\n" + ".set pop\n" + : "=m"(*r) + : "r"(r->tx), "r"(r->rx)); + + r->tx_msg_status = __read_32bit_c2_register($2, 0); + r->rx_msg_status = __read_32bit_c2_register($3, 0) & 0x0fffffff; +} + +void nlm_cop2_restore(struct nlm_cop2_state *r) +{ + u32 rstat; + + asm volatile( + ".set push\n" + ".set noat\n" + "ld $1, 0(%1)\n" + "dmtc2 $1, $0, 0\n" + "ld $1, 8(%1)\n" + "dmtc2 $1, $0, 1\n" + "ld $1, 16(%1)\n" + "dmtc2 $1, $0, 2\n" + "ld $1, 24(%1)\n" + "dmtc2 $1, $0, 3\n" + "ld $1, 0(%2)\n" + "dmtc2 $1, $1, 0\n" + "ld $1, 8(%2)\n" + "dmtc2 $1, $1, 1\n" + "ld $1, 16(%2)\n" + "dmtc2 $1, $1, 2\n" + "ld $1, 24(%2)\n" + "dmtc2 $1, $1, 3\n" + ".set pop\n" + : : "m"(*r), "r"(r->tx), "r"(r->rx)); + + __write_32bit_c2_register($2, 0, r->tx_msg_status); + rstat = __read_32bit_c2_register($3, 0) & 0xf0000000u; + __write_32bit_c2_register($3, 0, r->rx_msg_status | rstat); +} + +static int nlm_cu2_call(struct notifier_block *nfb, unsigned long action, + void *data) +{ + unsigned long flags; + unsigned int status; + + switch (action) { + case CU2_EXCEPTION: + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + break; + local_irq_save(flags); + KSTK_STATUS(current) |= ST0_CU2; + status = read_c0_status(); + write_c0_status(status | ST0_CU2); + nlm_cop2_restore(&(current->thread.cp2)); + write_c0_status(status & ~ST0_CU2); + local_irq_restore(flags); + pr_info("COP2 access enabled for pid %d (%s)\n", + current->pid, current->comm); + return NOTIFY_BAD; /* Don't call default notifier */ + } + + return NOTIFY_OK; /* Let default notifier send signals */ +} + +static int __init nlm_cu2_setup(void) +{ + return cu2_notifier(nlm_cu2_call, 0); +} +early_initcall(nlm_cu2_setup); diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c new file mode 100644 index 000000000000..a15cdbb8d0bd --- /dev/null +++ b/arch/mips/netlogic/xlp/dt.c @@ -0,0 +1,99 @@ +/* + * Copyright 2003-2013 Broadcom Corporation. + * All Rights Reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the Broadcom + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/bootmem.h> + +#include <linux/of_fdt.h> +#include <linux/of_platform.h> +#include <linux/of_device.h> + +extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[]; + +void __init *xlp_dt_init(void *fdtp) +{ + if (!fdtp) { + switch (current_cpu_data.processor_id & 0xff00) { +#ifdef CONFIG_DT_XLP_SVP + case PRID_IMP_NETLOGIC_XLP3XX: + fdtp = __dtb_xlp_svp_begin; + break; +#endif +#ifdef CONFIG_DT_XLP_EVP + case PRID_IMP_NETLOGIC_XLP8XX: + fdtp = __dtb_xlp_evp_begin; + break; +#endif + default: + /* Pick a built-in if any, and hope for the best */ + fdtp = __dtb_start; + break; + } + } + initial_boot_params = fdtp; + return fdtp; +} + +void __init device_tree_init(void) +{ + unsigned long base, size; + + if (!initial_boot_params) + return; + + base = virt_to_phys((void *)initial_boot_params); + size = be32_to_cpu(initial_boot_params->totalsize); + + /* Before we do anything, lets reserve the dt blob */ + reserve_bootmem(base, size, BOOTMEM_DEFAULT); + + unflatten_device_tree(); + + /* free the space reserved for the dt blob */ + free_bootmem(base, size); +} + +static struct of_device_id __initdata xlp_ids[] = { + { .compatible = "simple-bus", }, + {}, +}; + +int __init xlp8xx_ds_publish_devices(void) +{ + if (!of_have_populated_dt()) + return 0; + return of_platform_bus_probe(NULL, xlp_ids, NULL); +} + +device_initcall(xlp8xx_ds_publish_devices); diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index eaa99d28cb8e..7b638f7be491 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -33,19 +33,13 @@ */ #include <linux/kernel.h> -#include <linux/serial_8250.h> -#include <linux/pm.h> -#include <linux/bootmem.h> +#include <linux/of_fdt.h> #include <asm/idle.h> #include <asm/reboot.h> #include <asm/time.h> #include <asm/bootinfo.h> -#include <linux/of_fdt.h> -#include <linux/of_platform.h> -#include <linux/of_device.h> - #include <asm/netlogic/haldefs.h> #include <asm/netlogic/common.h> @@ -57,7 +51,6 @@ uint64_t nlm_io_base; struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; cpumask_t nlm_cpumask = CPU_MASK_CPU0; unsigned int nlm_threads_per_core; -extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[]; static void nlm_linux_exit(void) { @@ -68,41 +61,28 @@ static void nlm_linux_exit(void) cpu_wait(); } -void __init plat_mem_setup(void) +static void nlm_fixup_mem(void) { - void *fdtp; + const int pref_backup = 512; + int i; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + if (boot_mem_map.map[i].type != BOOT_MEM_RAM) + continue; + boot_mem_map.map[i].size -= pref_backup; + } +} +void __init plat_mem_setup(void) +{ panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; - /* - * If no FDT pointer is passed in, use the built-in FDT. - * device_tree_init() does not handle CKSEG0 pointers in - * 64-bit, so convert pointer. - */ - fdtp = (void *)(long)fw_arg0; - if (!fdtp) { - switch (current_cpu_data.processor_id & 0xff00) { -#ifdef CONFIG_DT_XLP_SVP - case PRID_IMP_NETLOGIC_XLP3XX: - fdtp = __dtb_xlp_svp_begin; - break; -#endif -#ifdef CONFIG_DT_XLP_EVP - case PRID_IMP_NETLOGIC_XLP8XX: - fdtp = __dtb_xlp_evp_begin; - break; -#endif - default: - /* Pick a built-in if any, and hope for the best */ - fdtp = __dtb_start; - break; - } - } - fdtp = phys_to_virt(__pa(fdtp)); - early_init_devtree(fdtp); + /* memory and bootargs from DT */ + early_init_devtree(initial_boot_params); + nlm_fixup_mem(); } const char *get_system_type(void) @@ -131,9 +111,19 @@ void nlm_percpu_init(int hwcpuid) void __init prom_init(void) { + void *reset_vec; + nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE); + nlm_init_boot_cpu(); xlp_mmu_init(); nlm_node_init(0); + xlp_dt_init((void *)(long)fw_arg0); + + /* Update reset entry point with CPU init code */ + reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS); + memset(reset_vec, 0, RESET_VEC_SIZE); + memcpy(reset_vec, (void *)nlm_reset_entry, + (nlm_reset_entry_end - nlm_reset_entry)); #ifdef CONFIG_SMP cpumask_setall(&nlm_cpumask); @@ -145,36 +135,3 @@ void __init prom_init(void) register_smp_ops(&nlm_smp_ops); #endif } - -void __init device_tree_init(void) -{ - unsigned long base, size; - - if (!initial_boot_params) - return; - - base = virt_to_phys((void *)initial_boot_params); - size = be32_to_cpu(initial_boot_params->totalsize); - - /* Before we do anything, lets reserve the dt blob */ - reserve_bootmem(base, size, BOOTMEM_DEFAULT); - - unflatten_device_tree(); - - /* free the space reserved for the dt blob */ - free_bootmem(base, size); -} - -static struct of_device_id __initdata xlp_ids[] = { - { .compatible = "simple-bus", }, - {}, -}; - -int __init xlp8xx_ds_publish_devices(void) -{ - if (!of_have_populated_dt()) - return 0; - return of_platform_bus_probe(NULL, xlp_ids, NULL); -} - -device_initcall(xlp8xx_ds_publish_devices); diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c index abb3e08cc052..0cce37cbffef 100644 --- a/arch/mips/netlogic/xlp/wakeup.c +++ b/arch/mips/netlogic/xlp/wakeup.c @@ -77,12 +77,28 @@ static int xlp_wakeup_core(uint64_t sysbase, int node, int core) return count != 0; } +static int wait_for_cpus(int cpu, int bootcpu) +{ + volatile uint32_t *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY); + int i, count, notready; + + count = 0x20000000; + do { + notready = nlm_threads_per_core; + for (i = 0; i < nlm_threads_per_core; i++) + if (cpu_ready[cpu + i] || cpu == bootcpu) + --notready; + } while (notready != 0 && --count > 0); + + return count != 0; +} + static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) { struct nlm_soc_info *nodep; uint64_t syspcibase; uint32_t syscoremask; - int core, n, cpu, count, val; + int core, n, cpu; for (n = 0; n < NLM_NR_NODES; n++) { syspcibase = nlm_get_sys_pcibase(n); @@ -122,11 +138,8 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) /* core is up */ nodep->coremask |= 1u << core; - /* spin until the first hw thread sets its ready */ - count = 0x20000000; - do { - val = *(volatile int *)&nlm_cpu_ready[cpu]; - } while (val == 0 && --count > 0); + /* spin until the hw threads sets their ready */ + wait_for_cpus(cpu, 0); } } } @@ -138,6 +151,7 @@ void xlp_wakeup_secondary_cpus() * first wakeup core 0 threads */ xlp_boot_core0_siblings(); + wait_for_cpus(0, 0); /* now get other cores out of reset */ xlp_enable_secondary_cores(&nlm_cpumask); diff --git a/arch/mips/netlogic/xlr/fmn.c b/arch/mips/netlogic/xlr/fmn.c index 4d74f03de506..d428e8471eec 100644 --- a/arch/mips/netlogic/xlr/fmn.c +++ b/arch/mips/netlogic/xlr/fmn.c @@ -74,13 +74,13 @@ static irqreturn_t fmn_message_handler(int irq, void *data) struct nlm_fmn_msg msg; uint32_t mflags, bkt_status; - mflags = nlm_cop2_enable(); + mflags = nlm_cop2_enable_irqsave(); /* Disable message ring interrupt */ nlm_fmn_setup_intr(irq, 0); while (1) { /* 8 bkts per core, [24:31] each bit represents one bucket * Bit is Zero if bucket is not empty */ - bkt_status = (nlm_read_c2_status() >> 24) & 0xff; + bkt_status = (nlm_read_c2_status0() >> 24) & 0xff; if (bkt_status == 0xff) break; for (bucket = 0; bucket < 8; bucket++) { @@ -97,16 +97,16 @@ static irqreturn_t fmn_message_handler(int irq, void *data) pr_warn("No msgring handler for stnid %d\n", src_stnid); else { - nlm_cop2_restore(mflags); + nlm_cop2_disable_irqrestore(mflags); hndlr->action(bucket, src_stnid, size, code, &msg, hndlr->arg); - mflags = nlm_cop2_enable(); + mflags = nlm_cop2_enable_irqsave(); } } }; /* Enable message ring intr, to any thread in core */ nlm_fmn_setup_intr(irq, (1 << nlm_threads_per_core) - 1); - nlm_cop2_restore(mflags); + nlm_cop2_disable_irqrestore(mflags); return IRQ_HANDLED; } @@ -128,7 +128,7 @@ void xlr_percpu_fmn_init(void) bucket_sizes = xlr_board_fmn_config.bucket_size; cpu_fmn_info = &xlr_board_fmn_config.cpu[id]; - flags = nlm_cop2_enable(); + flags = nlm_cop2_enable_irqsave(); /* Setup bucket sizes for the core. */ nlm_write_c2_bucksize(0, bucket_sizes[id * 8 + 0]); @@ -166,7 +166,7 @@ void xlr_percpu_fmn_init(void) /* enable FMN interrupts on this CPU */ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1); - nlm_cop2_restore(flags); + nlm_cop2_disable_irqrestore(flags); } @@ -198,7 +198,7 @@ void nlm_setup_fmn_irq(void) /* setup irq only once */ setup_irq(IRQ_FMN, &fmn_irqaction); - flags = nlm_cop2_enable(); + flags = nlm_cop2_enable_irqsave(); nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1); - nlm_cop2_restore(flags); + nlm_cop2_disable_irqrestore(flags); } diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 89c8c1066632..214d123b79fa 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -196,6 +196,7 @@ void __init prom_init(void) { int *argv, *envp; /* passed as 32 bit ptrs */ struct psb_info *prom_infop; + void *reset_vec; #ifdef CONFIG_SMP int i; #endif @@ -208,6 +209,12 @@ void __init prom_init(void) nlm_prom_info = *prom_infop; nlm_init_node(); + /* Update reset entry point with CPU init code */ + reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS); + memset(reset_vec, 0, RESET_VEC_SIZE); + memcpy(reset_vec, (void *)nlm_reset_entry, + (nlm_reset_entry_end - nlm_reset_entry)); + nlm_early_serial_setup(); build_arcs_cmdline(argv); prom_add_memory(); diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c index 3ebf7411d67b..c06e4c9f0478 100644 --- a/arch/mips/netlogic/xlr/wakeup.c +++ b/arch/mips/netlogic/xlr/wakeup.c @@ -53,6 +53,7 @@ int __cpuinit xlr_wakeup_secondary_cpus(void) { struct nlm_soc_info *nodep; unsigned int i, j, boot_cpu; + volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY); /* * In case of RMI boot, hit with NMI to get the cores @@ -71,7 +72,7 @@ int __cpuinit xlr_wakeup_secondary_cpus(void) nodep->coremask = 1; for (i = 1; i < NLM_CORES_PER_NODE; i++) { for (j = 1000000; j > 0; j--) { - if (nlm_cpu_ready[i * NLM_THREADS_PER_CORE]) + if (cpu_ready[i * NLM_THREADS_PER_CORE]) break; udelay(10); } diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 2cb1d315d225..c382042911dd 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -29,7 +29,7 @@ obj-$(CONFIG_LASAT) += pci-lasat.o obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o -obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o +obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o obj-$(CONFIG_PMC_MSP7120_FPGA) += fixup-pmcmsp.o ops-pmcmsp.o @@ -52,12 +52,11 @@ obj-$(CONFIG_TOSHIBA_RBTX4927) += fixup-rbtx4927.o obj-$(CONFIG_TOSHIBA_RBTX4938) += fixup-rbtx4938.o obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o -obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o +obj-$(CONFIG_CAVIUM_OCTEON_SOC) += pci-octeon.o pcie-octeon.o obj-$(CONFIG_CPU_XLR) += pci-xlr.o obj-$(CONFIG_CPU_XLP) += pci-xlp.o ifdef CONFIG_PCI_MSI -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o +obj-$(CONFIG_CAVIUM_OCTEON_SOC) += msi-octeon.o endif diff --git a/arch/mips/pci/fixup-wrppmc.c b/arch/mips/pci/fixup-wrppmc.c deleted file mode 100644 index 29737edd121f..000000000000 --- a/arch/mips/pci/fixup-wrppmc.c +++ /dev/null @@ -1,37 +0,0 @@ -/* - * fixup-wrppmc.c: PPMC board specific PCI fixup - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2006, Wind River Inc. Rongkai.zhan (rongkai.zhan@windriver.com) - */ -#include <linux/init.h> -#include <linux/pci.h> -#include <asm/gt64120.h> - -/* PCI interrupt pins */ -#define PCI_INTA 1 -#define PCI_INTB 2 -#define PCI_INTC 3 -#define PCI_INTD 4 - -#define PCI_SLOT_MAXNR 32 /* Each PCI bus has 32 physical slots */ - -static char pci_irq_tab[PCI_SLOT_MAXNR][5] __initdata = { - /* 0 INTA INTB INTC INTD */ - [0] = {0, 0, 0, 0, 0}, /* Slot 0: GT64120 PCI bridge */ - [6] = {0, WRPPMC_PCI_INTA_IRQ, 0, 0, 0}, -}; - -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - return pci_irq_tab[slot][pin]; -} - -/* Do platform specific device initialization at pci_enable_device() time */ -int pcibios_plat_dev_init(struct pci_dev *dev) -{ - return 0; -} diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c index 2eb954239bc5..151d9b5870bb 100644 --- a/arch/mips/pci/pci-bcm63xx.c +++ b/arch/mips/pci/pci-bcm63xx.c @@ -266,7 +266,7 @@ static int __init bcm63xx_register_pci(void) /* setup PCI to local bus access, used by PCI device to target * local RAM while bus mastering */ bcm63xx_int_cfg_writel(0, PCI_BASE_ADDRESS_3); - if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) + if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) val = MPI_SP0_REMAP_ENABLE_MASK; else val = 0; @@ -338,6 +338,7 @@ static int __init bcm63xx_pci_init(void) case BCM6328_CPU_ID: case BCM6362_CPU_ID: return bcm63xx_register_pcie(); + case BCM3368_CPU_ID: case BCM6348_CPU_ID: case BCM6358_CPU_ID: case BCM6368_CPU_ID: diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c index 6eb65e44d9e4..7b2ac81e1f59 100644 --- a/arch/mips/pci/pci-ip27.c +++ b/arch/mips/pci/pci-ip27.c @@ -217,6 +217,7 @@ static void pci_fixup_ioc3(struct pci_dev *d) pci_disable_swapping(d); } +#ifdef CONFIG_NUMA int pcibus_to_node(struct pci_bus *bus) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); @@ -224,6 +225,7 @@ int pcibus_to_node(struct pci_bus *bus) return bc->nasid; } EXPORT_SYMBOL(pcibus_to_node); +#endif /* CONFIG_NUMA */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, pci_fixup_ioc3); diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index 879077b01155..cb1ef9984069 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c @@ -89,7 +89,7 @@ static inline u32 ltq_calc_bar11mask(void) u32 mem, bar11mask; /* BAR11MASK value depends on available memory on system. */ - mem = num_physpages * PAGE_SIZE; + mem = get_num_physpages() * PAGE_SIZE; bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8; return bar11mask; diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/pci/pci-malta.c index 37134ddfeaa5..37134ddfeaa5 100644 --- a/arch/mips/mti-malta/malta-pci.c +++ b/arch/mips/pci/pci-malta.c diff --git a/arch/mips/pmcs-msp71xx/Makefile b/arch/mips/pmcs-msp71xx/Makefile index cefba7733b73..9201c8b3858d 100644 --- a/arch/mips/pmcs-msp71xx/Makefile +++ b/arch/mips/pmcs-msp71xx/Makefile @@ -3,7 +3,6 @@ # obj-y += msp_prom.o msp_setup.o msp_irq.o \ msp_time.o msp_serial.o msp_elb.o -obj-$(CONFIG_HAVE_GPIO_LIB) += gpio.o gpio_extended.o obj-$(CONFIG_PMC_MSP7120_GW) += msp_hwbutton.o obj-$(CONFIG_IRQ_MSP_SLP) += msp_irq_slp.o obj-$(CONFIG_IRQ_MSP_CIC) += msp_irq_cic.o msp_irq_per.o diff --git a/arch/mips/pmcs-msp71xx/gpio.c b/arch/mips/pmcs-msp71xx/gpio.c deleted file mode 100644 index aaccbe524386..000000000000 --- a/arch/mips/pmcs-msp71xx/gpio.c +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Generic PMC MSP71xx GPIO handling. These base gpio are controlled by two - * types of registers. The data register sets the output level when in output - * mode and when in input mode will contain the value at the input. The config - * register sets the various modes for each gpio. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * @author Patrick Glass <patrickglass@gmail.com> - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/gpio.h> -#include <linux/spinlock.h> -#include <linux/io.h> - -#define MSP71XX_CFG_OFFSET(gpio) (4 * (gpio)) -#define CONF_MASK 0x0F -#define MSP71XX_GPIO_INPUT 0x01 -#define MSP71XX_GPIO_OUTPUT 0x08 - -#define MSP71XX_GPIO_BASE 0x0B8400000L - -#define to_msp71xx_gpio_chip(c) container_of(c, struct msp71xx_gpio_chip, chip) - -static spinlock_t gpio_lock; - -/* - * struct msp71xx_gpio_chip - container for gpio chip and registers - * @chip: chip structure for the specified gpio bank - * @data_reg: register for reading and writing the gpio pin value - * @config_reg: register to set the mode for the gpio pin bank - * @out_drive_reg: register to set the output drive mode for the gpio pin bank - */ -struct msp71xx_gpio_chip { - struct gpio_chip chip; - void __iomem *data_reg; - void __iomem *config_reg; - void __iomem *out_drive_reg; -}; - -/* - * msp71xx_gpio_get() - return the chip's gpio value - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be returned - * - * It will return 0 if gpio value is low and other if high. - */ -static int msp71xx_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip); - - return __raw_readl(msp_chip->data_reg) & (1 << offset); -} - -/* - * msp71xx_gpio_set() - set the output value for the gpio - * @chip: chip structure who controls the specified gpio - * @offset: gpio whose value will be assigned - * @value: logic level to assign to the gpio initially - * - * This will set the gpio bit specified to the desired value. It will set the - * gpio pin low if value is 0 otherwise it will be high. - */ -static void msp71xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip); - unsigned long flags; - u32 data; - - spin_lock_irqsave(&gpio_lock, flags); - - data = __raw_readl(msp_chip->data_reg); - if (value) - data |= (1 << offset); - else - data &= ~(1 << offset); - __raw_writel(data, msp_chip->data_reg); - - spin_unlock_irqrestore(&gpio_lock, flags); -} - -/* - * msp71xx_set_gpio_mode() - declare the mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be assigned - * @mode: desired configuration for the gpio (see datasheet) - * - * It will set the gpio pin config to the @mode value passed in. - */ -static int msp71xx_set_gpio_mode(struct gpio_chip *chip, - unsigned offset, int mode) -{ - struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip); - const unsigned bit_offset = MSP71XX_CFG_OFFSET(offset); - unsigned long flags; - u32 cfg; - - spin_lock_irqsave(&gpio_lock, flags); - - cfg = __raw_readl(msp_chip->config_reg); - cfg &= ~(CONF_MASK << bit_offset); - cfg |= (mode << bit_offset); - __raw_writel(cfg, msp_chip->config_reg); - - spin_unlock_irqrestore(&gpio_lock, flags); - - return 0; -} - -/* - * msp71xx_direction_output() - declare the direction mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be assigned - * @value: logic level to assign to the gpio initially - * - * This call will set the mode for the @gpio to output. It will set the - * gpio pin low if value is 0 otherwise it will be high. - */ -static int msp71xx_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - msp71xx_gpio_set(chip, offset, value); - - return msp71xx_set_gpio_mode(chip, offset, MSP71XX_GPIO_OUTPUT); -} - -/* - * msp71xx_direction_input() - declare the direction mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose to which the value will be assigned - * - * This call will set the mode for the @gpio to input. - */ -static int msp71xx_direction_input(struct gpio_chip *chip, unsigned offset) -{ - return msp71xx_set_gpio_mode(chip, offset, MSP71XX_GPIO_INPUT); -} - -/* - * msp71xx_set_output_drive() - declare the output drive for the gpio line - * @gpio: gpio pin whose output drive you wish to modify - * @value: zero for active drain 1 for open drain drive - * - * This call will set the output drive mode for the @gpio to output. - */ -int msp71xx_set_output_drive(unsigned gpio, int value) -{ - unsigned long flags; - u32 data; - - if (gpio > 15 || gpio < 0) - return -EINVAL; - - spin_lock_irqsave(&gpio_lock, flags); - - data = __raw_readl((void __iomem *)(MSP71XX_GPIO_BASE + 0x190)); - if (value) - data |= (1 << gpio); - else - data &= ~(1 << gpio); - __raw_writel(data, (void __iomem *)(MSP71XX_GPIO_BASE + 0x190)); - - spin_unlock_irqrestore(&gpio_lock, flags); - - return 0; -} -EXPORT_SYMBOL(msp71xx_set_output_drive); - -#define MSP71XX_GPIO_BANK(name, dr, cr, base_gpio, num_gpio) \ -{ \ - .chip = { \ - .label = name, \ - .direction_input = msp71xx_direction_input, \ - .direction_output = msp71xx_direction_output, \ - .get = msp71xx_gpio_get, \ - .set = msp71xx_gpio_set, \ - .base = base_gpio, \ - .ngpio = num_gpio \ - }, \ - .data_reg = (void __iomem *)(MSP71XX_GPIO_BASE + dr), \ - .config_reg = (void __iomem *)(MSP71XX_GPIO_BASE + cr), \ - .out_drive_reg = (void __iomem *)(MSP71XX_GPIO_BASE + 0x190), \ -} - -/* - * struct msp71xx_gpio_banks[] - container array of gpio banks - * @chip: chip structure for the specified gpio bank - * @data_reg: register for reading and writing the gpio pin value - * @config_reg: register to set the mode for the gpio pin bank - * - * This array structure defines the gpio banks for the PMC MIPS Processor. - * We specify the bank name, the data register, the config register, base - * starting gpio number, and the number of gpios exposed by the bank. - */ -static struct msp71xx_gpio_chip msp71xx_gpio_banks[] = { - - MSP71XX_GPIO_BANK("GPIO_1_0", 0x170, 0x180, 0, 2), - MSP71XX_GPIO_BANK("GPIO_5_2", 0x174, 0x184, 2, 4), - MSP71XX_GPIO_BANK("GPIO_9_6", 0x178, 0x188, 6, 4), - MSP71XX_GPIO_BANK("GPIO_15_10", 0x17C, 0x18C, 10, 6), -}; - -void __init msp71xx_init_gpio(void) -{ - int i; - - spin_lock_init(&gpio_lock); - - for (i = 0; i < ARRAY_SIZE(msp71xx_gpio_banks); i++) - gpiochip_add(&msp71xx_gpio_banks[i].chip); -} diff --git a/arch/mips/pmcs-msp71xx/gpio_extended.c b/arch/mips/pmcs-msp71xx/gpio_extended.c deleted file mode 100644 index 2a99f360fae4..000000000000 --- a/arch/mips/pmcs-msp71xx/gpio_extended.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Generic PMC MSP71xx EXTENDED (EXD) GPIO handling. The extended gpio is - * a set of hardware registers that have no need for explicit locking as - * it is handled by unique method of writing individual set/clr bits. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * @author Patrick Glass <patrickglass@gmail.com> - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/gpio.h> -#include <linux/io.h> - -#define MSP71XX_DATA_OFFSET(gpio) (2 * (gpio)) -#define MSP71XX_READ_OFFSET(gpio) (MSP71XX_DATA_OFFSET(gpio) + 1) -#define MSP71XX_CFG_OUT_OFFSET(gpio) (MSP71XX_DATA_OFFSET(gpio) + 16) -#define MSP71XX_CFG_IN_OFFSET(gpio) (MSP71XX_CFG_OUT_OFFSET(gpio) + 1) - -#define MSP71XX_EXD_GPIO_BASE 0x0BC000000L - -#define to_msp71xx_exd_gpio_chip(c) \ - container_of(c, struct msp71xx_exd_gpio_chip, chip) - -/* - * struct msp71xx_exd_gpio_chip - container for gpio chip and registers - * @chip: chip structure for the specified gpio bank - * @reg: register for control and data of gpio pin - */ -struct msp71xx_exd_gpio_chip { - struct gpio_chip chip; - void __iomem *reg; -}; - -/* - * msp71xx_exd_gpio_get() - return the chip's gpio value - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be returned - * - * It will return 0 if gpio value is low and other if high. - */ -static int msp71xx_exd_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct msp71xx_exd_gpio_chip *msp71xx_chip = - to_msp71xx_exd_gpio_chip(chip); - const unsigned bit = MSP71XX_READ_OFFSET(offset); - - return __raw_readl(msp71xx_chip->reg) & (1 << bit); -} - -/* - * msp71xx_exd_gpio_set() - set the output value for the gpio - * @chip: chip structure who controls the specified gpio - * @offset: gpio whose value will be assigned - * @value: logic level to assign to the gpio initially - * - * This will set the gpio bit specified to the desired value. It will set the - * gpio pin low if value is 0 otherwise it will be high. - */ -static void msp71xx_exd_gpio_set(struct gpio_chip *chip, - unsigned offset, int value) -{ - struct msp71xx_exd_gpio_chip *msp71xx_chip = - to_msp71xx_exd_gpio_chip(chip); - const unsigned bit = MSP71XX_DATA_OFFSET(offset); - - __raw_writel(1 << (bit + (value ? 1 : 0)), msp71xx_chip->reg); -} - -/* - * msp71xx_exd_direction_output() - declare the direction mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be assigned - * @value: logic level to assign to the gpio initially - * - * This call will set the mode for the @gpio to output. It will set the - * gpio pin low if value is 0 otherwise it will be high. - */ -static int msp71xx_exd_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - struct msp71xx_exd_gpio_chip *msp71xx_chip = - to_msp71xx_exd_gpio_chip(chip); - - msp71xx_exd_gpio_set(chip, offset, value); - __raw_writel(1 << MSP71XX_CFG_OUT_OFFSET(offset), msp71xx_chip->reg); - return 0; -} - -/* - * msp71xx_exd_direction_input() - declare the direction mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose to which the value will be assigned - * - * This call will set the mode for the @gpio to input. - */ -static int msp71xx_exd_direction_input(struct gpio_chip *chip, unsigned offset) -{ - struct msp71xx_exd_gpio_chip *msp71xx_chip = - to_msp71xx_exd_gpio_chip(chip); - - __raw_writel(1 << MSP71XX_CFG_IN_OFFSET(offset), msp71xx_chip->reg); - return 0; -} - -#define MSP71XX_EXD_GPIO_BANK(name, exd_reg, base_gpio, num_gpio) \ -{ \ - .chip = { \ - .label = name, \ - .direction_input = msp71xx_exd_direction_input, \ - .direction_output = msp71xx_exd_direction_output, \ - .get = msp71xx_exd_gpio_get, \ - .set = msp71xx_exd_gpio_set, \ - .base = base_gpio, \ - .ngpio = num_gpio, \ - }, \ - .reg = (void __iomem *)(MSP71XX_EXD_GPIO_BASE + exd_reg), \ -} - -/* - * struct msp71xx_exd_gpio_banks[] - container array of gpio banks - * @chip: chip structure for the specified gpio bank - * @reg: register for reading and writing the gpio pin value - * - * This array structure defines the extended gpio banks for the - * PMC MIPS Processor. We specify the bank name, the data/config - * register,the base starting gpio number, and the number of - * gpios exposed by the bank of gpios. - */ -static struct msp71xx_exd_gpio_chip msp71xx_exd_gpio_banks[] = { - - MSP71XX_EXD_GPIO_BANK("GPIO_23_16", 0x188, 16, 8), - MSP71XX_EXD_GPIO_BANK("GPIO_27_24", 0x18C, 24, 4), -}; - -void __init msp71xx_init_gpio_extended(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(msp71xx_exd_gpio_banks); i++) - gpiochip_add(&msp71xx_exd_gpio_banks[i].chip); -} diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c index d38b095fd0d0..9f64c2387808 100644 --- a/arch/mips/powertv/asic/asic_devices.c +++ b/arch/mips/powertv/asic/asic_devices.c @@ -529,17 +529,8 @@ EXPORT_SYMBOL(asic_resource_get); */ void platform_release_memory(void *ptr, int size) { - unsigned long addr; - unsigned long end; - - addr = ((unsigned long)ptr + (PAGE_SIZE - 1)) & PAGE_MASK; - end = ((unsigned long)ptr + size) & PAGE_MASK; - - for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(__va(addr))); - init_page_count(virt_to_page(__va(addr))); - free_page((unsigned long)__va(addr)); - } + free_reserved_area((unsigned long)ptr, (unsigned long)(ptr + size), + -1, NULL); } EXPORT_SYMBOL(platform_release_memory); diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 6b5f3406f414..f25ea5b45051 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -104,7 +104,7 @@ static int __init plat_of_setup(void) if (!of_have_populated_dt()) panic("device tree not present"); - strncpy(of_ids[0].compatible, soc_info.compatible, len); + strlcpy(of_ids[0].compatible, soc_info.compatible, len); strncpy(of_ids[1].compatible, "palmbus", len); if (of_platform_populate(NULL, of_ids, NULL, NULL)) diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile index 1f29e761d691..da8f6816d346 100644 --- a/arch/mips/sgi-ip27/Makefile +++ b/arch/mips/sgi-ip27/Makefile @@ -7,4 +7,5 @@ obj-y := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o ip27-klnuma.o \ ip27-xtalk.o obj-$(CONFIG_EARLY_PRINTK) += ip27-console.o +obj-$(CONFIG_PCI) += ip27-irq-pci.o obj-$(CONFIG_SMP) += ip27-smp.o diff --git a/arch/mips/sgi-ip27/ip27-irq-pci.c b/arch/mips/sgi-ip27/ip27-irq-pci.c new file mode 100644 index 000000000000..ec22ec5600f3 --- /dev/null +++ b/arch/mips/sgi-ip27/ip27-irq-pci.c @@ -0,0 +1,266 @@ +/* + * ip27-irq.c: Highlevel interrupt handling for IP27 architecture. + * + * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 1999 - 2001 Kanoj Sarcar + */ + +#undef DEBUG + +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/timex.h> +#include <linux/smp.h> +#include <linux/random.h> +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/delay.h> +#include <linux/bitops.h> + +#include <asm/bootinfo.h> +#include <asm/io.h> +#include <asm/mipsregs.h> + +#include <asm/processor.h> +#include <asm/pci/bridge.h> +#include <asm/sn/addrs.h> +#include <asm/sn/agent.h> +#include <asm/sn/arch.h> +#include <asm/sn/hub.h> +#include <asm/sn/intr.h> + +/* + * Linux has a controller-independent x86 interrupt architecture. + * every controller has a 'controller-template', that is used + * by the main code to do the right thing. Each driver-visible + * interrupt source is transparently wired to the appropriate + * controller. Thus drivers need not be aware of the + * interrupt-controller. + * + * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, + * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. + * (IO-APICs assumed to be messaging to Pentium local-APICs) + * + * the code is designed to be easily extended with new/different + * interrupt controllers, without having to do assembly magic. + */ + +extern struct bridge_controller *irq_to_bridge[]; +extern int irq_to_slot[]; + +/* + * use these macros to get the encoded nasid and widget id + * from the irq value + */ +#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)] +#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i] + +static inline int alloc_level(int cpu, int irq) +{ + struct hub_data *hub = hub_data(cpu_to_node(cpu)); + struct slice_data *si = cpu_data[cpu].data; + int level; + + level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); + if (level >= LEVELS_PER_SLICE) + panic("Cpu %d flooded with devices", cpu); + + __set_bit(level, hub->irq_alloc_mask); + si->level_to_irq[level] = irq; + + return level; +} + +static inline int find_level(cpuid_t *cpunum, int irq) +{ + int cpu, i; + + for_each_online_cpu(cpu) { + struct slice_data *si = cpu_data[cpu].data; + + for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) + if (si->level_to_irq[i] == irq) { + *cpunum = cpu; + + return i; + } + } + + panic("Could not identify cpu/level for irq %d", irq); +} + +static int intr_connect_level(int cpu, int bit) +{ + nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + struct slice_data *si = cpu_data[cpu].data; + + set_bit(bit, si->irq_enable_mask); + + if (!cputoslice(cpu)) { + REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); + REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); + } else { + REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); + REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); + } + + return 0; +} + +static int intr_disconnect_level(int cpu, int bit) +{ + nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + struct slice_data *si = cpu_data[cpu].data; + + clear_bit(bit, si->irq_enable_mask); + + if (!cputoslice(cpu)) { + REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); + REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); + } else { + REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); + REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); + } + + return 0; +} + +/* Startup one of the (PCI ...) IRQs routes over a bridge. */ +static unsigned int startup_bridge_irq(struct irq_data *d) +{ + struct bridge_controller *bc; + bridgereg_t device; + bridge_t *bridge; + int pin, swlevel; + cpuid_t cpu; + + pin = SLOT_FROM_PCI_IRQ(d->irq); + bc = IRQ_TO_BRIDGE(d->irq); + bridge = bc->base; + + pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", d->irq, pin); + /* + * "map" irq to a swlevel greater than 6 since the first 6 bits + * of INT_PEND0 are taken + */ + swlevel = find_level(&cpu, d->irq); + bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8)); + bridge->b_int_enable |= (1 << pin); + bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */ + + /* + * Enable sending of an interrupt clear packt to the hub on a high to + * low transition of the interrupt pin. + * + * IRIX sets additional bits in the address which are documented as + * reserved in the bridge docs. + */ + bridge->b_int_mode |= (1UL << pin); + + /* + * We assume the bridge to have a 1:1 mapping between devices + * (slots) and intr pins. + */ + device = bridge->b_int_device; + device &= ~(7 << (pin*3)); + device |= (pin << (pin*3)); + bridge->b_int_device = device; + + bridge->b_wid_tflush; + + intr_connect_level(cpu, swlevel); + + return 0; /* Never anything pending. */ +} + +/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */ +static void shutdown_bridge_irq(struct irq_data *d) +{ + struct bridge_controller *bc = IRQ_TO_BRIDGE(d->irq); + bridge_t *bridge = bc->base; + int pin, swlevel; + cpuid_t cpu; + + pr_debug("bridge_shutdown: irq 0x%x\n", d->irq); + pin = SLOT_FROM_PCI_IRQ(d->irq); + + /* + * map irq to a swlevel greater than 6 since the first 6 bits + * of INT_PEND0 are taken + */ + swlevel = find_level(&cpu, d->irq); + intr_disconnect_level(cpu, swlevel); + + bridge->b_int_enable &= ~(1 << pin); + bridge->b_wid_tflush; +} + +static inline void enable_bridge_irq(struct irq_data *d) +{ + cpuid_t cpu; + int swlevel; + + swlevel = find_level(&cpu, d->irq); /* Criminal offence */ + intr_connect_level(cpu, swlevel); +} + +static inline void disable_bridge_irq(struct irq_data *d) +{ + cpuid_t cpu; + int swlevel; + + swlevel = find_level(&cpu, d->irq); /* Criminal offence */ + intr_disconnect_level(cpu, swlevel); +} + +static struct irq_chip bridge_irq_type = { + .name = "bridge", + .irq_startup = startup_bridge_irq, + .irq_shutdown = shutdown_bridge_irq, + .irq_mask = disable_bridge_irq, + .irq_unmask = enable_bridge_irq, +}; + +void register_bridge_irq(unsigned int irq) +{ + irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); +} + +int request_bridge_irq(struct bridge_controller *bc) +{ + int irq = allocate_irqno(); + int swlevel, cpu; + nasid_t nasid; + + if (irq < 0) + return irq; + + /* + * "map" irq to a swlevel greater than 6 since the first 6 bits + * of INT_PEND0 are taken + */ + cpu = bc->irq_cpu; + swlevel = alloc_level(cpu, irq); + if (unlikely(swlevel < 0)) { + free_irqno(irq); + + return -EAGAIN; + } + + /* Make sure it's not already pending when we connect it. */ + nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + REMOTE_HUB_CLR_INTR(nasid, swlevel); + + intr_connect_level(cpu, swlevel); + + register_bridge_irq(irq); + + return irq; +} diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 2315cfeb2687..3fbaef97a1b8 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -29,7 +29,6 @@ #include <asm/mipsregs.h> #include <asm/processor.h> -#include <asm/pci/bridge.h> #include <asm/sn/addrs.h> #include <asm/sn/agent.h> #include <asm/sn/arch.h> @@ -54,50 +53,6 @@ extern asmlinkage void ip27_irq(void); -extern struct bridge_controller *irq_to_bridge[]; -extern int irq_to_slot[]; - -/* - * use these macros to get the encoded nasid and widget id - * from the irq value - */ -#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)] -#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i] - -static inline int alloc_level(int cpu, int irq) -{ - struct hub_data *hub = hub_data(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - int level; - - level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); - if (level >= LEVELS_PER_SLICE) - panic("Cpu %d flooded with devices", cpu); - - __set_bit(level, hub->irq_alloc_mask); - si->level_to_irq[level] = irq; - - return level; -} - -static inline int find_level(cpuid_t *cpunum, int irq) -{ - int cpu, i; - - for_each_online_cpu(cpu) { - struct slice_data *si = cpu_data[cpu].data; - - for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) - if (si->level_to_irq[i] == irq) { - *cpunum = cpu; - - return i; - } - } - - panic("Could not identify cpu/level for irq %d", irq); -} - /* * Find first bit set */ @@ -204,175 +159,6 @@ static void ip27_hub_error(void) panic("CPU %d got a hub error interrupt", smp_processor_id()); } -static int intr_connect_level(int cpu, int bit) -{ - nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - - set_bit(bit, si->irq_enable_mask); - - if (!cputoslice(cpu)) { - REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); - } else { - REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); - } - - return 0; -} - -static int intr_disconnect_level(int cpu, int bit) -{ - nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - - clear_bit(bit, si->irq_enable_mask); - - if (!cputoslice(cpu)) { - REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); - } else { - REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); - } - - return 0; -} - -/* Startup one of the (PCI ...) IRQs routes over a bridge. */ -static unsigned int startup_bridge_irq(struct irq_data *d) -{ - struct bridge_controller *bc; - bridgereg_t device; - bridge_t *bridge; - int pin, swlevel; - cpuid_t cpu; - - pin = SLOT_FROM_PCI_IRQ(d->irq); - bc = IRQ_TO_BRIDGE(d->irq); - bridge = bc->base; - - pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", d->irq, pin); - /* - * "map" irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - swlevel = find_level(&cpu, d->irq); - bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8)); - bridge->b_int_enable |= (1 << pin); - bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */ - - /* - * Enable sending of an interrupt clear packt to the hub on a high to - * low transition of the interrupt pin. - * - * IRIX sets additional bits in the address which are documented as - * reserved in the bridge docs. - */ - bridge->b_int_mode |= (1UL << pin); - - /* - * We assume the bridge to have a 1:1 mapping between devices - * (slots) and intr pins. - */ - device = bridge->b_int_device; - device &= ~(7 << (pin*3)); - device |= (pin << (pin*3)); - bridge->b_int_device = device; - - bridge->b_wid_tflush; - - intr_connect_level(cpu, swlevel); - - return 0; /* Never anything pending. */ -} - -/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */ -static void shutdown_bridge_irq(struct irq_data *d) -{ - struct bridge_controller *bc = IRQ_TO_BRIDGE(d->irq); - bridge_t *bridge = bc->base; - int pin, swlevel; - cpuid_t cpu; - - pr_debug("bridge_shutdown: irq 0x%x\n", d->irq); - pin = SLOT_FROM_PCI_IRQ(d->irq); - - /* - * map irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - swlevel = find_level(&cpu, d->irq); - intr_disconnect_level(cpu, swlevel); - - bridge->b_int_enable &= ~(1 << pin); - bridge->b_wid_tflush; -} - -static inline void enable_bridge_irq(struct irq_data *d) -{ - cpuid_t cpu; - int swlevel; - - swlevel = find_level(&cpu, d->irq); /* Criminal offence */ - intr_connect_level(cpu, swlevel); -} - -static inline void disable_bridge_irq(struct irq_data *d) -{ - cpuid_t cpu; - int swlevel; - - swlevel = find_level(&cpu, d->irq); /* Criminal offence */ - intr_disconnect_level(cpu, swlevel); -} - -static struct irq_chip bridge_irq_type = { - .name = "bridge", - .irq_startup = startup_bridge_irq, - .irq_shutdown = shutdown_bridge_irq, - .irq_mask = disable_bridge_irq, - .irq_unmask = enable_bridge_irq, -}; - -void register_bridge_irq(unsigned int irq) -{ - irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); -} - -int request_bridge_irq(struct bridge_controller *bc) -{ - int irq = allocate_irqno(); - int swlevel, cpu; - nasid_t nasid; - - if (irq < 0) - return irq; - - /* - * "map" irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - cpu = bc->irq_cpu; - swlevel = alloc_level(cpu, irq); - if (unlikely(swlevel < 0)) { - free_irqno(irq); - - return -EAGAIN; - } - - /* Make sure it's not already pending when we connect it. */ - nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - REMOTE_HUB_CLR_INTR(nasid, swlevel); - - intr_connect_level(cpu, swlevel); - - register_bridge_irq(irq); - - return irq; -} - asmlinkage void plat_irq_dispatch(void) { unsigned long pending = read_c0_cause() & read_c0_status(); diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 1230f56429d7..a95c00f5fb96 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -357,8 +357,6 @@ static void __init szmem(void) int slot; cnodeid_t node; - num_physpages = 0; - for_each_online_node(node) { nodebytes = 0; for (slot = 0; slot < MAX_MEM_SLOTS; slot++) { @@ -381,7 +379,6 @@ static void __init szmem(void) slot = MAX_MEM_SLOTS; continue; } - num_physpages += slot_psize; memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)), PFN_PHYS(slot_psize), node); } @@ -480,32 +477,8 @@ void __init paging_init(void) void __init mem_init(void) { - unsigned long codesize, datasize, initsize, tmp; - unsigned node; - - high_memory = (void *) __va(num_physpages << PAGE_SHIFT); - - for_each_online_node(node) { - /* - * This will free up the bootmem, ie, slot 0 memory. - */ - totalram_pages += free_all_bootmem_node(NODE_DATA(node)); - } - + high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); + free_all_bootmem(); setup_zero_pages(); /* This comes from node 0 */ - - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - tmp = nr_free_pages(); - printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " - "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", - tmp << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - (num_physpages - tmp) << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10, - totalhigh_pages << (PAGE_SHIFT-10)); + mem_init_print_info(NULL); } diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig index 01cc1a749c73..5fbd3605d24f 100644 --- a/arch/mips/sibyte/Kconfig +++ b/arch/mips/sibyte/Kconfig @@ -147,7 +147,8 @@ config SIBYTE_CFE_CONSOLE config SIBYTE_BUS_WATCHER bool "Support for Bus Watcher statistics" - depends on SIBYTE_SB1xxx_SOC + depends on SIBYTE_SB1xxx_SOC && \ + (SIBYTE_BCM112X || SIBYTE_SB1250) help Handle and keep statistics on the bus error interrupts (COR_ECC, BAD_ECC, IO_BUS). diff --git a/arch/mips/sibyte/Platform b/arch/mips/sibyte/Platform index d03a07516f83..af117330ce14 100644 --- a/arch/mips/sibyte/Platform +++ b/arch/mips/sibyte/Platform @@ -13,7 +13,6 @@ cflags-$(CONFIG_SIBYTE_BCM112X) += \ -I$(srctree)/arch/mips/include/asm/mach-sibyte \ -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL -platform-$(CONFIG_SIBYTE_SB1250) += sibyte/ cflags-$(CONFIG_SIBYTE_SB1250) += \ -I$(srctree)/arch/mips/include/asm/mach-sibyte \ -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL @@ -31,7 +30,8 @@ cflags-$(CONFIG_SIBYTE_BCM1x80) += \ # Sibyte BCM91120C (CRhine) board # Sibyte BCM91125C (CRhone) board # Sibyte BCM91125E (Rhone) board -# Sibyte SWARM board +# Sibyte BCM91250A (SWARM) board +# Sibyte BCM91250C2 (LittleSur) board # Sibyte BCM91x80 (BigSur) board # load-$(CONFIG_SIBYTE_CARMEL) := 0xffffffff80100000 @@ -41,3 +41,4 @@ load-$(CONFIG_SIBYTE_RHONE) := 0xffffffff80100000 load-$(CONFIG_SIBYTE_SENTOSA) := 0xffffffff80100000 load-$(CONFIG_SIBYTE_SWARM) := 0xffffffff80100000 load-$(CONFIG_SIBYTE_BIGSUR) := 0xffffffff80100000 +load-$(CONFIG_SIBYTE_LITTLESUR) := 0xffffffff80100000 diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile index 36aa700cc40c..b3d6bf23a662 100644 --- a/arch/mips/sibyte/common/Makefile +++ b/arch/mips/sibyte/common/Makefile @@ -1,3 +1,4 @@ obj-y := cfe.o +obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o diff --git a/arch/mips/sibyte/sb1250/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c index 8871e3345bff..5581844c9194 100644 --- a/arch/mips/sibyte/sb1250/bus_watcher.c +++ b/arch/mips/sibyte/common/bus_watcher.c @@ -37,6 +37,9 @@ #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_scd.h> +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) +#include <asm/sibyte/bcm1480_regs.h> +#endif struct bw_stats_struct { @@ -81,9 +84,15 @@ void check_bus_watcher(void) #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS /* Destructive read, clears register and interrupt */ status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS)); -#else +#elif defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250) /* Use non-destructive register */ status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG)); +#elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) + /* Use non-destructive register */ + /* Same as 1250 except BUS_ERR_STATUS_DEBUG is in a different place. */ + status = csr_in32(IOADDR(A_BCM1480_BUS_ERR_STATUS_DEBUG)); +#else +#error bus watcher being built for unknown Sibyte SOC! #endif if (!(status & 0x7fffffff)) { printk("Using last values reaped by bus watcher driver\n"); @@ -175,9 +184,6 @@ static irqreturn_t sibyte_bw_int(int irq, void *data) #ifdef CONFIG_SIBYTE_BW_TRACE int i; #endif -#ifndef CONFIG_PROC_FS - char bw_buf[1024]; -#endif #ifdef CONFIG_SIBYTE_BW_TRACE csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG)); diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c index 2188b39a1251..059e28c8fd97 100644 --- a/arch/mips/sibyte/common/sb_tbprof.c +++ b/arch/mips/sibyte/common/sb_tbprof.c @@ -27,6 +27,7 @@ #include <linux/types.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/errno.h> diff --git a/arch/mips/sibyte/sb1250/Makefile b/arch/mips/sibyte/sb1250/Makefile index d3d969de407b..cdc4c56c3e29 100644 --- a/arch/mips/sibyte/sb1250/Makefile +++ b/arch/mips/sibyte/sb1250/Makefile @@ -1,4 +1,3 @@ obj-y := setup.o irq.o time.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c index cec4b8ca1438..12336c2a649c 100644 --- a/arch/mips/sni/pcimt.c +++ b/arch/mips/sni/pcimt.c @@ -185,6 +185,7 @@ static void __init sni_pcimt_resource_init(void) extern struct pci_ops sni_pcimt_ops; +#ifdef CONFIG_PCI static struct pci_controller sni_controller = { .pci_ops = &sni_pcimt_ops, .mem_resource = &sni_mem_resource, @@ -193,6 +194,7 @@ static struct pci_controller sni_controller = { .io_offset = 0x00000000UL, .io_map_base = SNI_PORT_BASE }; +#endif static void enable_pcimt_irq(struct irq_data *d) { diff --git a/arch/mips/sni/pcit.c b/arch/mips/sni/pcit.c index 7cddd03d1fea..05bb51676e82 100644 --- a/arch/mips/sni/pcit.c +++ b/arch/mips/sni/pcit.c @@ -128,13 +128,6 @@ static struct resource pcit_io_resources[] = { } }; -static struct resource sni_mem_resource = { - .start = 0x18000000UL, - .end = 0x1fbfffffUL, - .name = "PCIT PCI MEM", - .flags = IORESOURCE_MEM -}; - static void __init sni_pcit_resource_init(void) { int i; @@ -147,6 +140,14 @@ static void __init sni_pcit_resource_init(void) extern struct pci_ops sni_pcit_ops; +#ifdef CONFIG_PCI +static struct resource sni_mem_resource = { + .start = 0x18000000UL, + .end = 0x1fbfffffUL, + .name = "PCIT PCI MEM", + .flags = IORESOURCE_MEM +}; + static struct pci_controller sni_pcit_controller = { .pci_ops = &sni_pcit_ops, .mem_resource = &sni_mem_resource, @@ -155,6 +156,7 @@ static struct pci_controller sni_pcit_controller = { .io_offset = 0x00000000UL, .io_map_base = SNI_PORT_BASE }; +#endif /* CONFIG_PCI */ static void enable_pcit_irq(struct irq_data *d) { diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index 729a50991780..b7eccbd17bf7 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -331,7 +331,8 @@ static int tx4939_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + if (event == NETDEV_CHANGE && netif_carrier_ok(dev)) { __u64 bit = 0; if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(0)) diff --git a/arch/mips/wrppmc/Makefile b/arch/mips/wrppmc/Makefile deleted file mode 100644 index 307cc6920ce6..000000000000 --- a/arch/mips/wrppmc/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# -# This file is subject to the terms and conditions of the GNU General Public -# License. See the file "COPYING" in the main directory of this archive -# for more details. -# -# Copyright 2006 Wind River System, Inc. -# Author: Rongkai.Zhan <rongkai.zhan@windriver.com> -# -# Makefile for the Wind River MIPS 4Kc PPMC Eval Board -# - -obj-y += irq.o pci.o reset.o serial.o setup.o time.o diff --git a/arch/mips/wrppmc/Platform b/arch/mips/wrppmc/Platform deleted file mode 100644 index dc78b25b95fe..000000000000 --- a/arch/mips/wrppmc/Platform +++ /dev/null @@ -1,7 +0,0 @@ -# -# Wind River PPMC Board (4KC + GT64120) -# -platform-$(CONFIG_WR_PPMC) += wrppmc/ -cflags-$(CONFIG_WR_PPMC) += \ - -I$(srctree)/arch/mips/include/asm/mach-wrppmc -load-$(CONFIG_WR_PPMC) += 0xffffffff80100000 diff --git a/arch/mips/wrppmc/irq.c b/arch/mips/wrppmc/irq.c deleted file mode 100644 index f237bf4d5c3a..000000000000 --- a/arch/mips/wrppmc/irq.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * irq.c: GT64120 Interrupt Controller - * - * Copyright (C) 2006, Wind River System Inc. - * Author: Rongkai.Zhan, <rongkai.zhan@windriver.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#include <linux/hardirq.h> -#include <linux/init.h> -#include <linux/irq.h> - -#include <asm/gt64120.h> -#include <asm/irq_cpu.h> -#include <asm/mipsregs.h> - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; - - if (pending & STATUSF_IP7) - do_IRQ(WRPPMC_MIPS_TIMER_IRQ); /* CPU Compare/Count internal timer */ - else if (pending & STATUSF_IP6) - do_IRQ(WRPPMC_UART16550_IRQ); /* UART 16550 port */ - else if (pending & STATUSF_IP3) - do_IRQ(WRPPMC_PCI_INTA_IRQ); /* PCI INT_A */ - else - spurious_interrupt(); -} - -/** - * Initialize GT64120 Interrupt Controller - */ -void gt64120_init_pic(void) -{ - /* clear CPU Interrupt Cause Registers */ - GT_WRITE(GT_INTRCAUSE_OFS, (0x1F << 21)); - GT_WRITE(GT_HINTRCAUSE_OFS, 0x00); - - /* Disable all interrupts from GT64120 bridge chip */ - GT_WRITE(GT_INTRMASK_OFS, 0x00); - GT_WRITE(GT_HINTRMASK_OFS, 0x00); - GT_WRITE(GT_PCI0_ICMASK_OFS, 0x00); - GT_WRITE(GT_PCI0_HICMASK_OFS, 0x00); -} - -void __init arch_init_irq(void) -{ - /* IRQ 0 - 7 are for MIPS common irq_cpu controller */ - mips_cpu_irq_init(); - - gt64120_init_pic(); -} diff --git a/arch/mips/wrppmc/pci.c b/arch/mips/wrppmc/pci.c deleted file mode 100644 index 8b8a0e1a40ca..000000000000 --- a/arch/mips/wrppmc/pci.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * pci.c: GT64120 PCI support. - * - * Copyright (C) 2006, Wind River System Inc. Rongkai.Zhan <rongkai.zhan@windriver.com> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/types.h> -#include <linux/pci.h> - -#include <asm/gt64120.h> - -extern struct pci_ops gt64xxx_pci0_ops; - -static struct resource pci0_io_resource = { - .name = "pci_0 io", - .start = GT_PCI_IO_BASE, - .end = GT_PCI_IO_BASE + GT_PCI_IO_SIZE - 1, - .flags = IORESOURCE_IO, -}; - -static struct resource pci0_mem_resource = { - .name = "pci_0 memory", - .start = GT_PCI_MEM_BASE, - .end = GT_PCI_MEM_BASE + GT_PCI_MEM_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -static struct pci_controller hose_0 = { - .pci_ops = >64xxx_pci0_ops, - .io_resource = &pci0_io_resource, - .mem_resource = &pci0_mem_resource, -}; - -static int __init gt64120_pci_init(void) -{ - (void) GT_READ(GT_PCI0_CMD_OFS); /* Huh??? -- Ralf */ - (void) GT_READ(GT_PCI0_BARE_OFS); - - /* reset the whole PCI I/O space range */ - ioport_resource.start = GT_PCI_IO_BASE; - ioport_resource.end = GT_PCI_IO_BASE + GT_PCI_IO_SIZE - 1; - - register_pci_controller(&hose_0); - return 0; -} - -arch_initcall(gt64120_pci_init); diff --git a/arch/mips/wrppmc/reset.c b/arch/mips/wrppmc/reset.c deleted file mode 100644 index 80beb188ed47..000000000000 --- a/arch/mips/wrppmc/reset.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1997 Ralf Baechle - */ -#include <linux/irqflags.h> -#include <linux/kernel.h> - -#include <asm/cacheflush.h> -#include <asm/idle.h> -#include <asm/mipsregs.h> -#include <asm/processor.h> - -void wrppmc_machine_restart(char *command) -{ - /* - * Ouch, we're still alive ... This time we take the silver bullet ... - * ... and find that we leave the hardware in a state in which the - * kernel in the flush locks up somewhen during of after the PCI - * detection stuff. - */ - local_irq_disable(); - set_c0_status(ST0_BEV | ST0_ERL); - change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED); - flush_cache_all(); - write_c0_wired(0); - __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); -} - -void wrppmc_machine_halt(void) -{ - local_irq_disable(); - - printk(KERN_NOTICE "You can safely turn off the power\n"); - while (1) { - if (cpu_wait) - cpu_wait(); - } -} diff --git a/arch/mips/wrppmc/serial.c b/arch/mips/wrppmc/serial.c deleted file mode 100644 index 83f0f7d05187..000000000000 --- a/arch/mips/wrppmc/serial.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Registration of WRPPMC UART platform device. - * - * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/platform_device.h> -#include <linux/serial_8250.h> - -#include <asm/gt64120.h> - -static struct resource wrppmc_uart_resource[] __initdata = { - { - .start = WRPPMC_UART16550_BASE, - .end = WRPPMC_UART16550_BASE + 7, - .flags = IORESOURCE_MEM, - }, - { - .start = WRPPMC_UART16550_IRQ, - .end = WRPPMC_UART16550_IRQ, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct plat_serial8250_port wrppmc_serial8250_port[] = { - { - .irq = WRPPMC_UART16550_IRQ, - .uartclk = WRPPMC_UART16550_CLOCK, - .iotype = UPIO_MEM, - .flags = UPF_IOREMAP | UPF_SKIP_TEST, - .mapbase = WRPPMC_UART16550_BASE, - }, - {}, -}; - -static __init int wrppmc_uart_add(void) -{ - struct platform_device *pdev; - int retval; - - pdev = platform_device_alloc("serial8250", -1); - if (!pdev) - return -ENOMEM; - - pdev->id = PLAT8250_DEV_PLATFORM; - pdev->dev.platform_data = wrppmc_serial8250_port; - - retval = platform_device_add_resources(pdev, wrppmc_uart_resource, - ARRAY_SIZE(wrppmc_uart_resource)); - if (retval) - goto err_free_device; - - retval = platform_device_add(pdev); - if (retval) - goto err_free_device; - - return 0; - -err_free_device: - platform_device_put(pdev); - - return retval; -} -device_initcall(wrppmc_uart_add); diff --git a/arch/mips/wrppmc/setup.c b/arch/mips/wrppmc/setup.c deleted file mode 100644 index ca65c84031a7..000000000000 --- a/arch/mips/wrppmc/setup.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * setup.c: Setup pointers to hardware dependent routines. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996, 1997, 2004 by Ralf Baechle (ralf@linux-mips.org) - * Copyright (C) 2006, Wind River System Inc. Rongkai.zhan <rongkai.zhan@windriver.com> - */ -#include <linux/init.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/pm.h> - -#include <asm/io.h> -#include <asm/bootinfo.h> -#include <asm/reboot.h> -#include <asm/time.h> -#include <asm/gt64120.h> - -unsigned long gt64120_base = KSEG1ADDR(0x14000000); - -#ifdef WRPPMC_EARLY_DEBUG - -static volatile unsigned char * wrppmc_led = \ - (volatile unsigned char *)KSEG1ADDR(WRPPMC_LED_BASE); - -/* - * PPMC LED control register: - * -) bit[0] controls DS1 LED (1 - OFF, 0 - ON) - * -) bit[1] controls DS2 LED (1 - OFF, 0 - ON) - * -) bit[2] controls DS4 LED (1 - OFF, 0 - ON) - */ -void wrppmc_led_on(int mask) -{ - unsigned char value = *wrppmc_led; - - value &= (0xF8 | mask); - *wrppmc_led = value; -} - -/* If mask = 0, turn off all LEDs */ -void wrppmc_led_off(int mask) -{ - unsigned char value = *wrppmc_led; - - value |= (0x7 & mask); - *wrppmc_led = value; -} - -/* - * We assume that bootloader has initialized UART16550 correctly - */ -void __init wrppmc_early_putc(char ch) -{ - static volatile unsigned char *wrppmc_uart = \ - (volatile unsigned char *)KSEG1ADDR(WRPPMC_UART16550_BASE); - unsigned char value; - - /* Wait until Transmit-Holding-Register is empty */ - while (1) { - value = *(wrppmc_uart + 5); - if (value & 0x20) - break; - } - - *wrppmc_uart = ch; -} - -void __init wrppmc_early_printk(const char *fmt, ...) -{ - static char pbuf[256] = {'\0', }; - char *ch = pbuf; - va_list args; - unsigned int i; - - memset(pbuf, 0, 256); - va_start(args, fmt); - i = vsprintf(pbuf, fmt, args); - va_end(args); - - /* Print the string */ - while (*ch != '\0') { - wrppmc_early_putc(*ch); - /* if print '\n', also print '\r' */ - if (*ch++ == '\n') - wrppmc_early_putc('\r'); - } -} -#endif /* WRPPMC_EARLY_DEBUG */ - -void __init prom_free_prom_memory(void) -{ -} - -void __init plat_mem_setup(void) -{ - extern void wrppmc_machine_restart(char *command); - extern void wrppmc_machine_halt(void); - - _machine_restart = wrppmc_machine_restart; - _machine_halt = wrppmc_machine_halt; - pm_power_off = wrppmc_machine_halt; - - /* This makes the operations of 'in/out[bwl]' to the - * physical address ( < KSEG0) can work via KSEG1 - */ - set_io_port_base(KSEG1); -} - -const char *get_system_type(void) -{ - return "Wind River PPMC (GT64120)"; -} - -/* - * Initializes basic routines and structures pointers, memory size (as - * given by the bios and saves the command line. - */ -void __init prom_init(void) -{ - add_memory_region(WRPPMC_SDRAM_SCS0_BASE, WRPPMC_SDRAM_SCS0_SIZE, BOOT_MEM_RAM); - add_memory_region(WRPPMC_BOOTROM_BASE, WRPPMC_BOOTROM_SIZE, BOOT_MEM_ROM_DATA); - - wrppmc_early_printk("prom_init: GT64120 SDRAM Bank 0: 0x%x - 0x%08lx\n", - WRPPMC_SDRAM_SCS0_BASE, (WRPPMC_SDRAM_SCS0_BASE + WRPPMC_SDRAM_SCS0_SIZE)); -} diff --git a/arch/mips/wrppmc/time.c b/arch/mips/wrppmc/time.c deleted file mode 100644 index 668dbd5f12c5..000000000000 --- a/arch/mips/wrppmc/time.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * time.c: MIPS CPU Count/Compare timer hookup - * - * Author: Mark.Zhan, <rongkai.zhan@windriver.com> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996, 1997, 2004 by Ralf Baechle (ralf@linux-mips.org) - * Copyright (C) 2006, Wind River System Inc. - */ -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/irq.h> - -#include <asm/gt64120.h> -#include <asm/time.h> - -#define WRPPMC_CPU_CLK_FREQ 40000000 /* 40MHZ */ - -/* - * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect - * - * NOTE: We disable all GT64120 timers, and use MIPS processor internal - * timer as the source of kernel clock tick. - */ -void __init plat_time_init(void) -{ - /* Disable GT64120 timers */ - GT_WRITE(GT_TC_CONTROL_OFS, 0x00); - GT_WRITE(GT_TC0_OFS, 0x00); - GT_WRITE(GT_TC1_OFS, 0x00); - GT_WRITE(GT_TC2_OFS, 0x00); - GT_WRITE(GT_TC3_OFS, 0x00); - - /* Use MIPS compare/count internal timer */ - mips_hpt_frequency = WRPPMC_CPU_CLK_FREQ; -} diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 428da175d073..70e4f663ebd2 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -13,6 +13,7 @@ config MN10300 select MODULES_USE_ELF_RELA select OLD_SIGSUSPEND3 select OLD_SIGACTION + select HAVE_DEBUG_STACKOVERFLOW config AM33_2 def_bool n diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug index bdbfd444a9ff..94efb3ed223f 100644 --- a/arch/mn10300/Kconfig.debug +++ b/arch/mn10300/Kconfig.debug @@ -2,10 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - config DEBUG_DECOMPRESS_KERNEL bool "Using serial port during decompressing kernel" depends on DEBUG_KERNEL diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index b4ce844c9391..e2a2b203eb00 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -74,4 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index d48a84fd7fae..8a2e6ded9a44 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -345,9 +345,10 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - printk(KERN_ALERT "VM: killing process %s\n", tsk->comm); - if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) - do_exit(SIGKILL); + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) { + pagefault_out_of_memory(); + return; + } goto no_context; do_sigbus: diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index 5a8ace63a6b4..97a1ec0beeec 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c @@ -99,43 +99,21 @@ void __init paging_init(void) */ void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int tmp; - BUG_ON(!mem_map); #define START_PFN (contig_page_data.bdata->node_min_pfn) #define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn) - max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN; + max_mapnr = MAX_LOW_PFN - START_PFN; high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE); /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); /* this will put all low memory onto the freelists */ - totalram_pages += free_all_bootmem(); - - reservedpages = 0; - for (tmp = 0; tmp < num_physpages; tmp++) - if (PageReserved(&mem_map[tmp])) - reservedpages++; - - codesize = (unsigned long) &_etext - (unsigned long) &_stext; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO - "Memory: %luk/%luk available" - " (%dk kernel code, %dk reserved, %dk data, %dk init," - " %ldk highmem)\n", - nr_free_pages() << (PAGE_SHIFT - 10), - max_mapnr << (PAGE_SHIFT - 10), - codesize >> 10, - reservedpages << (PAGE_SHIFT - 10), - datasize >> 10, - initsize >> 10, - totalhigh_pages << (PAGE_SHIFT - 10)); + free_all_bootmem(); + + mem_init_print_info(NULL); } /* @@ -152,6 +130,7 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 1072bfd18c50..99dbab1c59ac 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -22,6 +22,7 @@ config OPENRISC select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select MODULES_USE_ELF_RELA + select HAVE_DEBUG_STACKOVERFLOW config MMU def_bool y @@ -128,16 +129,6 @@ config CMDLINE menu "Debugging options" -config DEBUG_STACKOVERFLOW - bool "Check for kernel stack overflow" - default y - help - Make extra checks for space available on stack in some - critical functions. This will cause kernel to run a bit slower, - but will catch most of kernel stack overruns and exit gracefully. - - Say Y if you are unsure. - config JUMP_UPON_UNHANDLED_EXCEPTION bool "Try to die gracefully" default y diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index e2bfafce66c5..4a41f8493ab0 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -267,10 +267,10 @@ out_of_memory: __asm__ __volatile__("l.nop 1"); up_read(&mm->mmap_sem); - printk("VM: killing process %s\n", tsk->comm); - if (user_mode(regs)) - do_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index b3cbc6703837..7f94652311d7 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -202,56 +202,20 @@ void __init paging_init(void) /* References to section boundaries */ -static int __init free_pages_init(void) -{ - int reservedpages, pfn; - - /* this will put all low memory onto the freelists */ - totalram_pages = free_all_bootmem(); - - reservedpages = 0; - for (pfn = 0; pfn < max_low_pfn; pfn++) { - /* - * Only count reserved RAM pages - */ - if (PageReserved(mem_map + pfn)) - reservedpages++; - } - - return reservedpages; -} - -static void __init set_max_mapnr_init(void) -{ - max_mapnr = num_physpages = max_low_pfn; -} - void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - BUG_ON(!mem_map); - set_max_mapnr_init(); - + max_mapnr = max_low_pfn; high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); /* clear the zero-page */ memset((void *)empty_zero_page, 0, PAGE_SIZE); - reservedpages = free_pages_init(); - - codesize = (unsigned long)&_etext - (unsigned long)&_stext; - datasize = (unsigned long)&_edata - (unsigned long)&_etext; - initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; + /* this will put all low memory onto the freelists */ + free_all_bootmem(); - printk(KERN_INFO - "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), - max_mapnr << (PAGE_SHIFT - 10), codesize >> 10, - reservedpages << (PAGE_SHIFT - 10), datasize >> 10, - initsize >> 10, (unsigned long)(0 << (PAGE_SHIFT - 10)) - ); + mem_init_print_info(NULL); printk("mem_init_done ...........................................\n"); mem_init_done = 1; @@ -261,11 +225,11 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 2a2aea5aae5b..aa399a5259b6 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -27,6 +27,7 @@ config PARISC select MODULES_USE_ELF_RELA select CLONE_BACKWARDS select TTY # Needed for pdc_cons.c + select HAVE_DEBUG_STACKOVERFLOW help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug index 08a332f6ee87..bc989e522a04 100644 --- a/arch/parisc/Kconfig.debug +++ b/arch/parisc/Kconfig.debug @@ -13,14 +13,3 @@ config DEBUG_RODATA If in doubt, say "N". endmenu - -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - default y - depends on DEBUG_KERNEL - ---help--- - Say Y here if you want to check the overflows of kernel, IRQ - and exception stacks. This option will cause messages of the - stacks in detail when free stack space drops below a certain - limit. - If in doubt, say "N". diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 96ec3982be8d..e02f665f804a 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -17,6 +17,8 @@ # Mike Shaver, Helge Deller and Martin K. Petersen # +KBUILD_IMAGE := vmlinuz + KBUILD_DEFCONFIG := default_defconfig NM = sh $(srctree)/arch/parisc/nm @@ -92,7 +94,7 @@ PALOCONF := $(shell if [ -f $(src)/palo.conf ]; then echo $(src)/palo.conf; \ else echo $(obj)/palo.conf; \ fi) -palo: vmlinux +palo: vmlinuz @if test ! -x "$(PALO)"; then \ echo 'ERROR: Please install palo first (apt-get install palo)';\ echo 'or build it from source and install it somewhere in your $$PATH';\ @@ -107,10 +109,14 @@ palo: vmlinux fi $(PALO) -f $(PALOCONF) -# Shorthands for known targets not supported by parisc, use vmlinux as default -Image zImage bzImage: vmlinux +# Shorthands for known targets not supported by parisc, use vmlinux/vmlinuz as default +Image: vmlinux +zImage bzImage: vmlinuz + +vmlinuz: vmlinux + @gzip -cf -9 $< > $@ -install: vmlinux +install: vmlinuz sh $(src)/arch/parisc/install.sh \ $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)" @@ -119,6 +125,7 @@ MRPROPER_FILES += palo.conf define archhelp @echo '* vmlinux - Uncompressed kernel image (./vmlinux)' + @echo ' vmlinuz - Compressed kernel image (./vmlinuz)' @echo ' palo - Bootable image (./lifimage)' @echo ' install - Install kernel using' @echo ' (your) ~/bin/$(INSTALLKERNEL) or' diff --git a/arch/parisc/defpalo.conf b/arch/parisc/defpalo.conf index 4e1ae25b08d1..208ff3b41487 100644 --- a/arch/parisc/defpalo.conf +++ b/arch/parisc/defpalo.conf @@ -4,7 +4,7 @@ # Most people using 'make palo' want a bootable file, usable for # network or tape booting for example. --init-tape=lifimage ---recoverykernel=vmlinux +--recoverykernel=vmlinuz ########## Pick your ROOT here! ########## # You need at least one 'root='! @@ -12,10 +12,10 @@ # If you want a root ramdisk, use the next 2 lines # (Edit the ramdisk image name!!!!) --ramdisk=ram-disk-image-file ---commandline=0/vmlinux HOME=/ root=/dev/ram initrd=0/ramdisk +--commandline=0/vmlinuz HOME=/ root=/dev/ram initrd=0/ramdisk panic_timeout=60 panic=-1 # If you want NFS root, use the following command line (Edit the HOSTNAME!!!) -#--commandline=0/vmlinux HOME=/ root=/dev/nfs nfsroot=HOSTNAME ip=bootp +#--commandline=0/vmlinuz HOME=/ root=/dev/nfs nfsroot=HOSTNAME ip=bootp # If you have root on a disk partition, use this (Edit the partition name!!!) -#--commandline=0/vmlinux HOME=/ root=/dev/sda1 +#--commandline=0/vmlinuz HOME=/ root=/dev/sda1 diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h index d306b75bc77f..e1509308899f 100644 --- a/arch/parisc/include/asm/special_insns.h +++ b/arch/parisc/include/asm/special_insns.h @@ -32,9 +32,12 @@ static inline void set_eiem(unsigned long val) cr; \ }) -#define mtsp(gr, cr) \ - __asm__ __volatile__("mtsp %0,%1" \ +#define mtsp(val, cr) \ + { if (__builtin_constant_p(val) && ((val) == 0)) \ + __asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \ + else \ + __asm__ __volatile__("mtsp %0,%1" \ : /* no outputs */ \ - : "r" (gr), "i" (cr) : "memory") + : "r" (val), "i" (cr) : "memory"); } #endif /* __PARISC_SPECIAL_INSNS_H */ diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h index 5273da991e06..9d086a599fa0 100644 --- a/arch/parisc/include/asm/tlbflush.h +++ b/arch/parisc/include/asm/tlbflush.h @@ -63,13 +63,14 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - unsigned long flags; + unsigned long flags, sid; /* For one page, it's not worth testing the split_tlb variable */ mb(); - mtsp(vma->vm_mm->context,1); + sid = vma->vm_mm->context; purge_tlb_start(flags); + mtsp(sid, 1); pdtlb(addr); pitlb(addr); purge_tlb_end(flags); diff --git a/arch/parisc/include/uapi/asm/fcntl.h b/arch/parisc/include/uapi/asm/fcntl.h index cc61c475f277..34a46cbc76ed 100644 --- a/arch/parisc/include/uapi/asm/fcntl.h +++ b/arch/parisc/include/uapi/asm/fcntl.h @@ -20,7 +20,7 @@ #define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */ #define O_PATH 020000000 -#define O_TMPFILE 040000000 +#define __O_TMPFILE 040000000 #define F_GETLK64 8 #define F_SETLK64 9 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 70c512a386f7..71700e636a8e 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -73,6 +73,8 @@ #define SO_SELECT_ERR_QUEUE 0x4026 +#define SO_BUSY_POLL 0x4027 + /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index e593fc8d58bc..4da682b466d0 100644 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -26,13 +26,13 @@ if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi # Default install -if [ -f $4/vmlinux ]; then - mv $4/vmlinux $4/vmlinux.old +if [ -f $4/vmlinuz ]; then + mv $4/vmlinuz $4/vmlinuz.old fi if [ -f $4/System.map ]; then mv $4/System.map $4/System.old fi -cat $2 > $4/vmlinux +cat $2 > $4/vmlinuz cp $3 $4/System.map diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 65fb4cbc3a0f..2e65aa54bd10 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -440,8 +440,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, else { unsigned long flags; - mtsp(sid, 1); purge_tlb_start(flags); + mtsp(sid, 1); if (split_tlb) { while (npages--) { pdtlb(start); diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index c8fb61ed32f4..8a96c8ab9fe6 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -371,10 +371,23 @@ show_cpuinfo (struct seq_file *m, void *v) seq_printf(m, "capabilities\t:"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32) - seq_printf(m, " os32"); + seq_puts(m, " os32"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64) - seq_printf(m, " os64"); - seq_printf(m, "\n"); + seq_puts(m, " os64"); + if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) + seq_puts(m, " iopdir_fdc"); + switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) { + case PDC_MODEL_NVA_SUPPORTED: + seq_puts(m, " nva_supported"); + break; + case PDC_MODEL_NVA_SLOW: + seq_puts(m, " nva_slow"); + break; + case PDC_MODEL_NVA_UNSUPPORTED: + seq_puts(m, " needs_equivalent_aliasing"); + break; + } + seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities); seq_printf(m, "model\t\t: %s\n" "model name\t: %s\n", diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index a49cc812df8a..ac4370b1ca40 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -2,6 +2,7 @@ * Optimized memory copy routines. * * Copyright (C) 2004 Randolph Chung <tausq@debian.org> + * Copyright (C) 2013 Helge Deller <deller@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -153,17 +154,21 @@ static inline void prefetch_dst(const void *addr) #define prefetch_dst(addr) do { } while(0) #endif +#define PA_MEMCPY_OK 0 +#define PA_MEMCPY_LOAD_ERROR 1 +#define PA_MEMCPY_STORE_ERROR 2 + /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words * per loop. This code is derived from glibc. */ -static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len) +static inline unsigned long copy_dstaligned(unsigned long dst, + unsigned long src, unsigned long len) { /* gcc complains that a2 and a3 may be uninitialized, but actually * they cannot be. Initialize a2/a3 to shut gcc up. */ register unsigned int a0, a1, a2 = 0, a3 = 0; int sh_1, sh_2; - struct exception_data *d; /* prefetch_src((const void *)src); */ @@ -197,7 +202,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src goto do2; case 0: if (len == 0) - return 0; + return PA_MEMCPY_OK; /* a3 = ((unsigned int *) src)[0]; a0 = ((unsigned int *) src)[1]; */ ldw(s_space, 0, src, a3, cda_ldw_exc); @@ -256,42 +261,35 @@ do0: preserve_branch(handle_load_error); preserve_branch(handle_store_error); - return 0; + return PA_MEMCPY_OK; handle_load_error: __asm__ __volatile__ ("cda_ldw_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", - o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); - return o_len * 4 - d->fault_addr + o_src; + return PA_MEMCPY_LOAD_ERROR; handle_store_error: __asm__ __volatile__ ("cda_stw_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", - o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); - return o_len * 4 - d->fault_addr + o_dst; + return PA_MEMCPY_STORE_ERROR; } -/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ -static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) +/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. + * In case of an access fault the faulty address can be read from the per_cpu + * exception data struct. */ +static unsigned long pa_memcpy_internal(void *dstp, const void *srcp, + unsigned long len) { register unsigned long src, dst, t1, t2, t3; register unsigned char *pcs, *pcd; register unsigned int *pws, *pwd; register double *pds, *pdd; - unsigned long ret = 0; - unsigned long o_dst, o_src, o_len; - struct exception_data *d; + unsigned long ret; src = (unsigned long)srcp; dst = (unsigned long)dstp; pcs = (unsigned char *)srcp; pcd = (unsigned char *)dstp; - o_dst = dst; o_src = src; o_len = len; - /* prefetch_src((const void *)srcp); */ if (len < THRESHOLD) @@ -401,7 +399,7 @@ byte_copy: len--; } - return 0; + return PA_MEMCPY_OK; unaligned_copy: /* possibly we are aligned on a word, but not on a double... */ @@ -438,8 +436,7 @@ unaligned_copy: src = (unsigned long)pcs; } - ret = copy_dstaligned(dst, src, len / sizeof(unsigned int), - o_dst, o_src, o_len); + ret = copy_dstaligned(dst, src, len / sizeof(unsigned int)); if (ret) return ret; @@ -454,17 +451,41 @@ unaligned_copy: handle_load_error: __asm__ __volatile__ ("pmc_load_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", - o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); - return o_len - d->fault_addr + o_src; + return PA_MEMCPY_LOAD_ERROR; handle_store_error: __asm__ __volatile__ ("pmc_store_exc:\n"); + return PA_MEMCPY_STORE_ERROR; +} + + +/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ +static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) +{ + unsigned long ret, fault_addr, reference; + struct exception_data *d; + + ret = pa_memcpy_internal(dstp, srcp, len); + if (likely(ret == PA_MEMCPY_OK)) + return 0; + + /* if a load or store fault occured we can get the faulty addr */ d = &__get_cpu_var(exception_data); - DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", - o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); - return o_len - d->fault_addr + o_dst; + fault_addr = d->fault_addr; + + /* error in load or store? */ + if (ret == PA_MEMCPY_LOAD_ERROR) + reference = (unsigned long) srcp; + else + reference = (unsigned long) dstp; + + DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n", + ret, len, fault_addr, reference); + + if (fault_addr >= reference) + return len - (fault_addr - reference); + else + return len; } #ifdef __KERNEL__ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 505b56c6b9b9..b0f96c0e6316 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -214,7 +214,6 @@ static void __init setup_bootmem(void) mem_limit_func(); /* check for "mem=" argument */ mem_max = 0; - num_physpages = 0; for (i = 0; i < npmem_ranges; i++) { unsigned long rsize; @@ -229,10 +228,8 @@ static void __init setup_bootmem(void) npmem_ranges = i + 1; mem_max = mem_limit; } - num_physpages += pmem_ranges[i].pages; break; } - num_physpages += pmem_ranges[i].pages; mem_max += rsize; } @@ -532,7 +529,7 @@ void free_initmem(void) * pages are no-longer executable */ flush_icache_range(init_begin, init_end); - num_physpages += free_initmem_default(0); + free_initmem_default(-1); /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); @@ -580,8 +577,6 @@ unsigned long pcxl_dma_start __read_mostly; void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - /* Do sanity checks on page table constants */ BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); @@ -590,45 +585,8 @@ void __init mem_init(void) > BITS_PER_LONG); high_memory = __va((max_pfn << PAGE_SHIFT)); - -#ifndef CONFIG_DISCONTIGMEM - max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1; - totalram_pages += free_all_bootmem(); -#else - { - int i; - - for (i = 0; i < npmem_ranges; i++) - totalram_pages += free_all_bootmem_node(NODE_DATA(i)); - } -#endif - - codesize = (unsigned long)_etext - (unsigned long)_text; - datasize = (unsigned long)_edata - (unsigned long)_etext; - initsize = (unsigned long)__init_end - (unsigned long)__init_begin; - - reservedpages = 0; -{ - unsigned long pfn; -#ifdef CONFIG_DISCONTIGMEM - int i; - - for (i = 0; i < npmem_ranges; i++) { - for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) { - if (PageReserved(pfn_to_page(pfn))) - reservedpages++; - } - } -#else /* !CONFIG_DISCONTIGMEM */ - for (pfn = 0; pfn < max_pfn; pfn++) { - /* - * Only count reserved RAM pages - */ - if (PageReserved(pfn_to_page(pfn))) - reservedpages++; - } -#endif -} + set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); + free_all_bootmem(); #ifdef CONFIG_PA11 if (hppa_dma_ops == &pcxl_dma_ops) { @@ -643,15 +601,7 @@ void __init mem_init(void) parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); #endif - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10 - ); - + mem_init_print_info(NULL); #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ printk("virtual kernel memory layout:\n" " vmalloc : 0x%p - 0x%p (%4ld MB)\n" @@ -1101,6 +1051,6 @@ void flush_tlb_all(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - num_physpages += free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 508e3fe934d2..3bf72cd2c8fc 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -138,6 +138,7 @@ config PPC select ARCH_USE_BUILTIN_BSWAP select OLD_SIGSUSPEND select OLD_SIGACTION if PPC32 + select HAVE_DEBUG_STACKOVERFLOW config EARLY_PRINTK bool @@ -298,7 +299,7 @@ config HUGETLB_PAGE_SIZE_VARIABLE config MATH_EMULATION bool "Math emulation" - depends on 4xx || 8xx || E200 || PPC_MPC832x || E500 + depends on 4xx || 8xx || PPC_MPC832x || BOOKE ---help--- Some PowerPC chips designed for embedded applications do not have a floating-point unit and therefore do not implement the @@ -307,6 +308,10 @@ config MATH_EMULATION unit, which will allow programs that use floating-point instructions to run. + This is also useful to emulate missing (optional) instructions + such as fsqrt on cores that do have an FPU but do not implement + them (such as Freescale BookE). + config PPC_TRANSACTIONAL_MEM bool "Transactional Memory support for POWERPC" depends on PPC_BOOK3S_64 @@ -315,17 +320,6 @@ config PPC_TRANSACTIONAL_MEM ---help--- Support user-mode Transactional Memory on POWERPC. -config 8XX_MINIMAL_FPEMU - bool "Minimal math emulation for 8xx" - depends on 8xx && !MATH_EMULATION - help - Older arch/ppc kernels still emulated a few floating point - instructions such as load and store, even when full math - emulation is disabled. Say "Y" here if you want to preserve - this behavior. - - It is recommended that you build a soft-float userspace instead. - config IOMMU_HELPER def_bool PPC64 @@ -674,7 +668,6 @@ config SBUS config FSL_SOC bool - select HAVE_CAN_FLEXCAN if NET && CAN config FSL_PCI bool diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 863d877e0b5f..21c9f304e96c 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -28,13 +28,6 @@ config PRINT_STACK_DEPTH too small and stack traces cause important information to scroll off the screen. -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - help - This option will cause messages to be printed if free stack space - drops below a certain limit. - config HCALL_STATS bool "Hypervisor call instrumentation" depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS @@ -147,6 +140,13 @@ choice enable debugging for the wrong type of machine your kernel _will not boot_. +config PPC_EARLY_DEBUG_BOOTX + bool "BootX or OpenFirmware" + depends on BOOTX_TEXT + help + Select this to enable early debugging for a machine using BootX + or OpenFirmware. + config PPC_EARLY_DEBUG_LPAR bool "LPAR HV Console" depends on PPC_PSERIES diff --git a/arch/powerpc/boot/dts/currituck.dts b/arch/powerpc/boot/dts/currituck.dts index b801dd06e573..d2c8a872308e 100644 --- a/arch/powerpc/boot/dts/currituck.dts +++ b/arch/powerpc/boot/dts/currituck.dts @@ -103,6 +103,11 @@ interrupts = <34 2>; }; + FPGA0: fpga@50000000 { + compatible = "ibm,currituck-fpga"; + reg = <0x50000000 0x4>; + }; + IIC0: i2c@00000000 { compatible = "ibm,iic-currituck", "ibm,iic"; reg = <0x0 0x00000014>; diff --git a/arch/powerpc/boot/dts/fsl/interlaken-lac-portals.dtsi b/arch/powerpc/boot/dts/fsl/interlaken-lac-portals.dtsi new file mode 100644 index 000000000000..9cffccf4e07e --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/interlaken-lac-portals.dtsi @@ -0,0 +1,156 @@ +/* T4240 Interlaken LAC Portal device tree stub with 24 portals. + * + * Copyright 2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#address-cells = <0x1>; +#size-cells = <0x1>; +compatible = "fsl,interlaken-lac-portals"; + +lportal0: lac-portal@0 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x0 0x1000>; +}; + +lportal1: lac-portal@1000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x1000 0x1000>; +}; + +lportal2: lac-portal@2000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x2000 0x1000>; +}; + +lportal3: lac-portal@3000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x3000 0x1000>; +}; + +lportal4: lac-portal@4000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x4000 0x1000>; +}; + +lportal5: lac-portal@5000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x5000 0x1000>; +}; + +lportal6: lac-portal@6000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x6000 0x1000>; +}; + +lportal7: lac-portal@7000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x7000 0x1000>; +}; + +lportal8: lac-portal@8000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x8000 0x1000>; +}; + +lportal9: lac-portal@9000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x9000 0x1000>; +}; + +lportal10: lac-portal@A000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0xA000 0x1000>; +}; + +lportal11: lac-portal@B000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0xB000 0x1000>; +}; + +lportal12: lac-portal@C000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0xC000 0x1000>; +}; + +lportal13: lac-portal@D000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0xD000 0x1000>; +}; + +lportal14: lac-portal@E000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0xE000 0x1000>; +}; + +lportal15: lac-portal@F000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0xF000 0x1000>; +}; + +lportal16: lac-portal@10000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x10000 0x1000>; +}; + +lportal17: lac-portal@11000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x11000 0x1000>; +}; + +lportal18: lac-portal@1200 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x12000 0x1000>; +}; + +lportal19: lac-portal@13000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x13000 0x1000>; +}; + +lportal20: lac-portal@14000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x14000 0x1000>; +}; + +lportal21: lac-portal@15000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x15000 0x1000>; +}; + +lportal22: lac-portal@16000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x16000 0x1000>; +}; + +lportal23: lac-portal@17000 { + compatible = "fsl,interlaken-lac-portal-v1.0"; + reg = <0x17000 0x1000>; +}; diff --git a/arch/powerpc/boot/dts/fsl/interlaken-lac.dtsi b/arch/powerpc/boot/dts/fsl/interlaken-lac.dtsi new file mode 100644 index 000000000000..e8208720ac0e --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/interlaken-lac.dtsi @@ -0,0 +1,45 @@ +/* + * T4 Interlaken Look-aside Controller (LAC) device tree stub + * + * Copyright 2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +lac: lac@229000 { + compatible = "fsl,interlaken-lac"; + reg = <0x229000 0x1000>; + interrupts = <16 2 1 18>; +}; + +lac-hv@228000 { + compatible = "fsl,interlaken-lac-hv"; + reg = <0x228000 0x1000>; + fsl,non-hv-node = <&lac>; +}; diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig index 2a84fd7f631c..671a8f960afa 100644 --- a/arch/powerpc/configs/c2k_defconfig +++ b/arch/powerpc/configs/c2k_defconfig @@ -423,6 +423,8 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_BOOTX_TEXT=y +CONFIG_PPC_EARLY_DEBUG=y +CONFIG_PPC_EARLY_DEBUG_BOOTX=y CONFIG_KEYS=y CONFIG_KEYS_DEBUG_PROC_KEYS=y CONFIG_SECURITY=y diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 07b7f2af2dca..1ea22fc24ea8 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -284,6 +284,8 @@ CONFIG_DEBUG_MUTEXES=y CONFIG_LATENCYTOP=y CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_BOOTX_TEXT=y +CONFIG_PPC_EARLY_DEBUG=y +CONFIG_PPC_EARLY_DEBUG_BOOTX=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_ECB=m diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 02ac96b679b8..2a5afac29861 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -138,6 +138,8 @@ CONFIG_DEBUG_STACK_USAGE=y CONFIG_XMON=y CONFIG_XMON_DEFAULT=y CONFIG_BOOTX_TEXT=y +CONFIG_PPC_EARLY_DEBUG=y +CONFIG_PPC_EARLY_DEBUG_BOOTX=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig index 0d0d981442fd..ee853a1b1b2c 100644 --- a/arch/powerpc/configs/mpc512x_defconfig +++ b/arch/powerpc/configs/mpc512x_defconfig @@ -1,7 +1,6 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_SYSVIPC=y -CONFIG_SPARSE_IRQ=y +CONFIG_NO_HZ=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set @@ -9,6 +8,7 @@ CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_CFQ is not set # CONFIG_PPC_CHRP is not set CONFIG_PPC_MPC512x=y @@ -16,9 +16,7 @@ CONFIG_MPC5121_ADS=y CONFIG_MPC512x_GENERIC=y CONFIG_PDM360NG=y # CONFIG_PPC_PMAC is not set -CONFIG_NO_HZ=y CONFIG_HZ_1000=y -# CONFIG_MIGRATION is not set # CONFIG_SECCOMP is not set # CONFIG_PCI is not set CONFIG_NET=y @@ -33,8 +31,6 @@ CONFIG_IP_PNP=y # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_CAN=y -CONFIG_CAN_RAW=y -CONFIG_CAN_BCM=y CONFIG_CAN_VCAN=y CONFIG_CAN_MSCAN=y CONFIG_CAN_DEBUG_DEVICES=y @@ -46,7 +42,6 @@ CONFIG_DEVTMPFS_MOUNT=y # CONFIG_FIRMWARE_IN_KERNEL is not set CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -60,7 +55,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=1 CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_XIP=y -CONFIG_MISC_DEVICES=y CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT25=y CONFIG_SCSI=y @@ -68,6 +62,7 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y CONFIG_NETDEVICES=y +CONFIG_FS_ENET=y CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_QSEMI_PHY=y @@ -83,10 +78,6 @@ CONFIG_STE10XP=y CONFIG_LSI_ET1011C_PHY=y CONFIG_FIXED_PHY=y CONFIG_MDIO_BITBANG=y -CONFIG_NET_ETHERNET=y -CONFIG_FS_ENET=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_WLAN is not set # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=y @@ -106,14 +97,18 @@ CONFIG_GPIO_SYSFS=y CONFIG_GPIO_MPC8XXX=y # CONFIG_HWMON is not set CONFIG_MEDIA_SUPPORT=y -CONFIG_VIDEO_DEV=y CONFIG_VIDEO_ADV_DEBUG=y -# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set -CONFIG_VIDEO_SAA711X=y CONFIG_FB=y CONFIG_FB_FSL_DIU=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_FSL=y +# CONFIG_USB_EHCI_HCD_PPC_OF is not set +CONFIG_USB_STORAGE=y +CONFIG_USB_GADGET=y +CONFIG_USB_FSL_USB2=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_M41T80=y CONFIG_RTC_DRV_MPC5121=y @@ -129,9 +124,7 @@ CONFIG_TMPFS=y CONFIG_JFFS2_FS=y CONFIG_UBIFS_FS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y -CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index 165e6b32baef..152fa05b15e4 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -131,6 +131,7 @@ CONFIG_DUMMY=y CONFIG_FS_ENET=y CONFIG_UCC_GETH=y CONFIG_GIANFAR=y +CONFIG_E1000E=y CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_CICADA_PHY=y diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 29767a8dfea5..a73626b09051 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -350,6 +350,8 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_XMON=y CONFIG_XMON_DEFAULT=y CONFIG_BOOTX_TEXT=y +CONFIG_PPC_EARLY_DEBUG=y +CONFIG_PPC_EARLY_DEBUG_BOOTX=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index aef3f71de5ad..c86fcb92358e 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -398,6 +398,8 @@ CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y CONFIG_BOOTX_TEXT=y +CONFIG_PPC_EARLY_DEBUG=y +CONFIG_PPC_EARLY_DEBUG_BOOTX=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index be1cb6ea3a36..20ebfaf7234b 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -1264,6 +1264,8 @@ CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_XMON=y CONFIG_BOOTX_TEXT=y +CONFIG_PPC_EARLY_DEBUG=y +CONFIG_PPC_EARLY_DEBUG_BOOTX=y CONFIG_KEYS=y CONFIG_KEYS_DEBUG_PROC_KEYS=y CONFIG_SECURITY=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index c4dfbaf8b192..bea8587c3af5 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -296,6 +296,7 @@ CONFIG_SQUASHFS=m CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y +CONFIG_PSTORE=y CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index a80e32b46c11..09a8743143f3 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -24,6 +24,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/string.h> +#include <linux/time.h> struct pci_dev; struct pci_bus; @@ -52,6 +53,7 @@ struct device_node; #define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ #define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ +#define EEH_PE_PHB_DEAD (1 << 2) /* Dead PHB */ struct eeh_pe { int type; /* PE type: PHB/Bus/Device */ @@ -59,8 +61,10 @@ struct eeh_pe { int config_addr; /* Traditional PCI address */ int addr; /* PE configuration address */ struct pci_controller *phb; /* Associated PHB */ + struct pci_bus *bus; /* Top PCI bus for bus PE */ int check_count; /* Times of ignored error */ int freeze_count; /* Times of froze up */ + struct timeval tstamp; /* Time on first-time freeze */ int false_positives; /* Times of reported #ff's */ struct eeh_pe *parent; /* Parent PE */ struct list_head child_list; /* Link PE to the child list */ @@ -95,12 +99,12 @@ struct eeh_dev { static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev) { - return edev->dn; + return edev ? edev->dn : NULL; } static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev) { - return edev->pdev; + return edev ? edev->pdev : NULL; } /* @@ -130,8 +134,9 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev) struct eeh_ops { char *name; int (*init)(void); + int (*post_init)(void); void* (*of_probe)(struct device_node *dn, void *flag); - void* (*dev_probe)(struct pci_dev *dev, void *flag); + int (*dev_probe)(struct pci_dev *dev, void *flag); int (*set_option)(struct eeh_pe *pe, int option); int (*get_pe_addr)(struct eeh_pe *pe); int (*get_state)(struct eeh_pe *pe, int *state); @@ -141,11 +146,12 @@ struct eeh_ops { int (*configure_bridge)(struct eeh_pe *pe); int (*read_config)(struct device_node *dn, int where, int size, u32 *val); int (*write_config)(struct device_node *dn, int where, int size, u32 val); + int (*next_error)(struct eeh_pe **pe); }; extern struct eeh_ops *eeh_ops; extern int eeh_subsystem_enabled; -extern struct mutex eeh_mutex; +extern raw_spinlock_t confirm_error_lock; extern int eeh_probe_mode; #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ @@ -166,14 +172,14 @@ static inline int eeh_probe_mode_dev(void) return (eeh_probe_mode == EEH_PROBE_MODE_DEV); } -static inline void eeh_lock(void) +static inline void eeh_serialize_lock(unsigned long *flags) { - mutex_lock(&eeh_mutex); + raw_spin_lock_irqsave(&confirm_error_lock, *flags); } -static inline void eeh_unlock(void) +static inline void eeh_serialize_unlock(unsigned long flags) { - mutex_unlock(&eeh_mutex); + raw_spin_unlock_irqrestore(&confirm_error_lock, flags); } /* @@ -184,8 +190,11 @@ static inline void eeh_unlock(void) typedef void *(*eeh_traverse_func)(void *data, void *flag); int eeh_phb_pe_create(struct pci_controller *phb); +struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); +struct eeh_pe *eeh_pe_get(struct eeh_dev *edev); int eeh_add_to_parent_pe(struct eeh_dev *edev); int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe); +void eeh_pe_update_time_stamp(struct eeh_pe *pe); void *eeh_pe_dev_traverse(struct eeh_pe *root, eeh_traverse_func fn, void *flag); void eeh_pe_restore_bars(struct eeh_pe *pe); @@ -193,12 +202,13 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); void *eeh_dev_init(struct device_node *dn, void *data); void eeh_dev_phb_init_dynamic(struct pci_controller *phb); +int eeh_init(void); int __init eeh_ops_register(struct eeh_ops *ops); int __exit eeh_ops_unregister(const char *name); unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val); int eeh_dev_check_failure(struct eeh_dev *edev); -void __init eeh_addr_cache_build(void); +void eeh_addr_cache_build(void); void eeh_add_device_tree_early(struct device_node *); void eeh_add_device_tree_late(struct pci_bus *); void eeh_add_sysfs_files(struct pci_bus *); @@ -221,6 +231,11 @@ void eeh_remove_bus_device(struct pci_dev *, int); #else /* !CONFIG_EEH */ +static inline int eeh_init(void) +{ + return 0; +} + static inline void *eeh_dev_init(struct device_node *dn, void *data) { return NULL; @@ -245,9 +260,6 @@ static inline void eeh_add_sysfs_files(struct pci_bus *bus) { } static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { } -static inline void eeh_lock(void) { } -static inline void eeh_unlock(void) { } - #define EEH_POSSIBLE_ERROR(val, type) (0) #define EEH_IO_ERROR_VALUE(size) (-1UL) #endif /* CONFIG_EEH */ diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h index de67d830151b..89d5670b2eeb 100644 --- a/arch/powerpc/include/asm/eeh_event.h +++ b/arch/powerpc/include/asm/eeh_event.h @@ -31,7 +31,9 @@ struct eeh_event { struct eeh_pe *pe; /* EEH PE */ }; +int eeh_event_init(void); int eeh_send_failure_event(struct eeh_pe *pe); +void eeh_remove_event(struct eeh_pe *pe); void eeh_handle_event(struct eeh_pe *pe); #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 46793b58a761..07ca627e52c0 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -358,12 +358,12 @@ label##_relon_pSeries: \ /* No guest interrupts come through here */ \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ - EXC_STD, KVMTEST_PR, vec) + EXC_STD, NOTEST, vec) #define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \ .globl label##_relon_pSeries; \ label##_relon_pSeries: \ - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ + EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD) #define STD_RELON_EXCEPTION_HV(loc, vec, label) \ @@ -374,12 +374,12 @@ label##_relon_hv: \ /* No guest interrupts come through here */ \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ - EXC_HV, KVMTEST, vec) + EXC_HV, NOTEST, vec) #define STD_RELON_EXCEPTION_HV_OOL(vec, label) \ .globl label##_relon_hv; \ label##_relon_hv: \ - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \ + EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV) /* This associate vector numbers with bits in paca->irq_happened */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index f2498c8e595d..d750336b171d 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -191,8 +191,14 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { } -#endif /* CONFIG_HUGETLB_PAGE */ +#define hugepd_shift(x) 0 +static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, + unsigned pdshift) +{ + return 0; +} +#endif /* CONFIG_HUGETLB_PAGE */ /* * FSL Book3E platforms require special gpage handling - the gpages diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h index 1a9d9aea21fa..088f95b2e14f 100644 --- a/arch/powerpc/include/asm/ibmebus.h +++ b/arch/powerpc/include/asm/ibmebus.h @@ -48,8 +48,8 @@ extern struct bus_type ibmebus_bus_type; -int ibmebus_register_driver(struct of_platform_driver *drv); -void ibmebus_unregister_driver(struct of_platform_driver *drv); +int ibmebus_register_driver(struct platform_driver *drv); +void ibmebus_unregister_driver(struct platform_driver *drv); int ibmebus_request_irq(u32 ist, irq_handler_t handler, unsigned long irq_flags, const char *devname, diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index cbfe678e3dbe..c34656a8925e 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -76,6 +76,9 @@ struct iommu_table { struct iommu_pool large_pool; struct iommu_pool pools[IOMMU_NR_POOLS]; unsigned long *it_map; /* A simple allocation bitmap for now */ +#ifdef CONFIG_IOMMU_API + struct iommu_group *it_group; +#endif }; struct scatterlist; @@ -98,6 +101,8 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); */ extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, int nid); +extern void iommu_register_group(struct iommu_table *tbl, + int pci_domain_number, unsigned long pe_num); extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, @@ -125,13 +130,6 @@ extern void iommu_init_early_pSeries(void); extern void iommu_init_early_dart(void); extern void iommu_init_early_pasemi(void); -#ifdef CONFIG_PCI -extern void pci_iommu_init(void); -extern void pci_direct_iommu_init(void); -#else -static inline void pci_iommu_init(void) { } -#endif - extern void alloc_dart_table(void); #if defined(CONFIG_PPC64) && defined(CONFIG_PM) static inline void iommu_save(void) @@ -147,5 +145,26 @@ static inline void iommu_restore(void) } #endif +/* The API to support IOMMU operations for VFIO */ +extern int iommu_tce_clear_param_check(struct iommu_table *tbl, + unsigned long ioba, unsigned long tce_value, + unsigned long npages); +extern int iommu_tce_put_param_check(struct iommu_table *tbl, + unsigned long ioba, unsigned long tce); +extern int iommu_tce_build(struct iommu_table *tbl, unsigned long entry, + unsigned long hwaddr, enum dma_data_direction direction); +extern unsigned long iommu_clear_tce(struct iommu_table *tbl, + unsigned long entry); +extern int iommu_clear_tces_and_put_pages(struct iommu_table *tbl, + unsigned long entry, unsigned long pages); +extern int iommu_put_tce_user_mode(struct iommu_table *tbl, + unsigned long entry, unsigned long tce); + +extern void iommu_flush_tce(struct iommu_table *tbl); +extern int iommu_take_ownership(struct iommu_table *tbl); +extern void iommu_release_ownership(struct iommu_table *tbl); + +extern enum dma_data_direction iommu_tce_direction(unsigned long tce); + #endif /* __KERNEL__ */ #endif /* _ASM_IOMMU_H */ diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 154ba3f6b7a9..86d638a3b359 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -159,36 +159,46 @@ static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type) } /* - * Lock and read a linux PTE. If it's present and writable, atomically - * set dirty and referenced bits and return the PTE, otherwise return 0. + * If it's present and writable, atomically set dirty and referenced bits and + * return the PTE, otherwise return 0. If we find a transparent hugepage + * and if it is marked splitting we return 0; */ -static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing) +static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing, + unsigned int hugepage) { - pte_t pte, tmp; - - /* wait until _PAGE_BUSY is clear then set it atomically */ - __asm__ __volatile__ ( - "1: ldarx %0,0,%3\n" - " andi. %1,%0,%4\n" - " bne- 1b\n" - " ori %1,%0,%4\n" - " stdcx. %1,0,%3\n" - " bne- 1b" - : "=&r" (pte), "=&r" (tmp), "=m" (*p) - : "r" (p), "i" (_PAGE_BUSY) - : "cc"); - - if (pte_present(pte)) { - pte = pte_mkyoung(pte); - if (writing && pte_write(pte)) - pte = pte_mkdirty(pte); - } + pte_t old_pte, new_pte = __pte(0); + + while (1) { + old_pte = pte_val(*ptep); + /* + * wait until _PAGE_BUSY is clear then set it atomically + */ + if (unlikely(old_pte & _PAGE_BUSY)) { + cpu_relax(); + continue; + } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* If hugepage and is trans splitting return None */ + if (unlikely(hugepage && + pmd_trans_splitting(pte_pmd(old_pte)))) + return __pte(0); +#endif + /* If pte is not present return None */ + if (unlikely(!(old_pte & _PAGE_PRESENT))) + return __pte(0); - *p = pte; /* clears _PAGE_BUSY */ + new_pte = pte_mkyoung(old_pte); + if (writing && pte_write(old_pte)) + new_pte = pte_mkdirty(new_pte); - return pte; + if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte, + new_pte)) + break; + } + return new_pte; } + /* Return HPTE cache control bits corresponding to Linux pte bits */ static inline unsigned long hpte_cache_bits(unsigned long pte_val) { diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index b1e7f2af1016..9b12f88d4adb 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -66,7 +66,8 @@ struct lppaca { u8 reserved6[48]; u8 cede_latency_hint; - u8 reserved7[7]; + u8 ebb_regs_in_use; + u8 reserved7[6]; u8 dtl_enable_mask; /* Dispatch Trace Log mask */ u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */ u8 fpregs_in_use; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 92386fc4e82a..8b480901165a 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -36,13 +36,13 @@ struct machdep_calls { #ifdef CONFIG_PPC64 void (*hpte_invalidate)(unsigned long slot, unsigned long vpn, - int psize, int ssize, - int local); + int bpsize, int apsize, + int ssize, int local); long (*hpte_updatepp)(unsigned long slot, unsigned long newpp, unsigned long vpn, - int psize, int ssize, - int local); + int bpsize, int apsize, + int ssize, int local); void (*hpte_updateboltedpp)(unsigned long newpp, unsigned long ea, int psize, int ssize); @@ -57,6 +57,9 @@ struct machdep_calls { void (*hpte_removebolted)(unsigned long ea, int psize, int ssize); void (*flush_hash_range)(unsigned long number, int local); + void (*hugepage_invalidate)(struct mm_struct *mm, + unsigned char *hpte_slot_array, + unsigned long addr, int psize); /* special for kexec, to be called in real mode, linear mapping is * destroyed as well */ diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 2accc9611248..c4cf01197273 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -340,6 +340,20 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap) int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, int local, int ssize, unsigned int shift, unsigned int mmu_psize); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int __hash_page_thp(unsigned long ea, unsigned long access, + unsigned long vsid, pmd_t *pmdp, unsigned long trap, + int local, int ssize, unsigned int psize); +#else +static inline int __hash_page_thp(unsigned long ea, unsigned long access, + unsigned long vsid, pmd_t *pmdp, + unsigned long trap, int local, + int ssize, unsigned int psize) +{ + BUG(); + return -1; +} +#endif extern void hash_failure_debug(unsigned long ea, unsigned long access, unsigned long vsid, unsigned long trap, int ssize, int psize, int lpsize, diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h index 885c040d6194..8ae133eaf9fa 100644 --- a/arch/powerpc/include/asm/mpc5121.h +++ b/arch/powerpc/include/asm/mpc5121.h @@ -68,6 +68,5 @@ struct mpc512x_lpc { }; int mpc512x_cs_config(unsigned int cs, u32 val); -int __init mpc5121_clk_init(void); #endif /* __ASM_POWERPC_MPC5121_H__ */ diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index c0f9ef90f0b8..4a1ac9fbf186 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h @@ -339,6 +339,8 @@ struct mpic #endif }; +extern struct bus_type mpic_subsys; + /* * MPIC flags (passed to mpic_alloc) * @@ -393,6 +395,9 @@ struct mpic #define MPIC_REGSET_STANDARD MPIC_REGSET(0) /* Original MPIC */ #define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */ +/* Get the version of primary MPIC */ +extern u32 fsl_mpic_primary_get_version(void); + /* Allocate the controller structure and setup the linux irq descs * for the range if interrupts passed in. No HW initialization is * actually performed. diff --git a/arch/powerpc/include/asm/mpic_timer.h b/arch/powerpc/include/asm/mpic_timer.h new file mode 100644 index 000000000000..0e23cd4ac8aa --- /dev/null +++ b/arch/powerpc/include/asm/mpic_timer.h @@ -0,0 +1,46 @@ +/* + * arch/powerpc/include/asm/mpic_timer.h + * + * Header file for Mpic Global Timer + * + * Copyright 2013 Freescale Semiconductor, Inc. + * + * Author: Wang Dongsheng <Dongsheng.Wang@freescale.com> + * Li Yang <leoli@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MPIC_TIMER__ +#define __MPIC_TIMER__ + +#include <linux/interrupt.h> +#include <linux/time.h> + +struct mpic_timer { + void *dev; + struct cascade_priv *cascade_handle; + unsigned int num; + unsigned int irq; +}; + +#ifdef CONFIG_MPIC_TIMER +struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev, + const struct timeval *time); +void mpic_start_timer(struct mpic_timer *handle); +void mpic_stop_timer(struct mpic_timer *handle); +void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time); +void mpic_free_timer(struct mpic_timer *handle); +#else +struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev, + const struct timeval *time) { return NULL; } +void mpic_start_timer(struct mpic_timer *handle) { } +void mpic_stop_timer(struct mpic_timer *handle) { } +void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time) { } +void mpic_free_timer(struct mpic_timer *handle) { } +#endif + +#endif diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index cbb9305ab15a..029fe85722aa 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -117,7 +117,13 @@ extern int opal_enter_rtas(struct rtas_args *args, #define OPAL_SET_SLOT_LED_STATUS 55 #define OPAL_GET_EPOW_STATUS 56 #define OPAL_SET_SYSTEM_ATTENTION_LED 57 +#define OPAL_RESERVED1 58 +#define OPAL_RESERVED2 59 +#define OPAL_PCI_NEXT_ERROR 60 +#define OPAL_PCI_EEH_FREEZE_STATUS2 61 +#define OPAL_PCI_POLL 62 #define OPAL_PCI_MSI_EOI 63 +#define OPAL_PCI_GET_PHB_DIAG_DATA2 64 #ifndef __ASSEMBLY__ @@ -125,6 +131,7 @@ extern int opal_enter_rtas(struct rtas_args *args, enum OpalVendorApiTokens { OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999 }; + enum OpalFreezeState { OPAL_EEH_STOPPED_NOT_FROZEN = 0, OPAL_EEH_STOPPED_MMIO_FREEZE = 1, @@ -134,55 +141,69 @@ enum OpalFreezeState { OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5, OPAL_EEH_STOPPED_PERM_UNAVAIL = 6 }; + enum OpalEehFreezeActionToken { OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1, OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3 }; + enum OpalPciStatusToken { - OPAL_EEH_PHB_NO_ERROR = 0, - OPAL_EEH_PHB_FATAL = 1, - OPAL_EEH_PHB_RECOVERABLE = 2, - OPAL_EEH_PHB_BUS_ERROR = 3, - OPAL_EEH_PCI_NO_DEVSEL = 4, - OPAL_EEH_PCI_TA = 5, - OPAL_EEH_PCIEX_UR = 6, - OPAL_EEH_PCIEX_CA = 7, - OPAL_EEH_PCI_MMIO_ERROR = 8, - OPAL_EEH_PCI_DMA_ERROR = 9 + OPAL_EEH_NO_ERROR = 0, + OPAL_EEH_IOC_ERROR = 1, + OPAL_EEH_PHB_ERROR = 2, + OPAL_EEH_PE_ERROR = 3, + OPAL_EEH_PE_MMIO_ERROR = 4, + OPAL_EEH_PE_DMA_ERROR = 5 }; + +enum OpalPciErrorSeverity { + OPAL_EEH_SEV_NO_ERROR = 0, + OPAL_EEH_SEV_IOC_DEAD = 1, + OPAL_EEH_SEV_PHB_DEAD = 2, + OPAL_EEH_SEV_PHB_FENCED = 3, + OPAL_EEH_SEV_PE_ER = 4, + OPAL_EEH_SEV_INF = 5 +}; + enum OpalShpcAction { OPAL_SHPC_GET_LINK_STATE = 0, OPAL_SHPC_GET_SLOT_STATE = 1 }; + enum OpalShpcLinkState { OPAL_SHPC_LINK_DOWN = 0, OPAL_SHPC_LINK_UP = 1 }; + enum OpalMmioWindowType { OPAL_M32_WINDOW_TYPE = 1, OPAL_M64_WINDOW_TYPE = 2, OPAL_IO_WINDOW_TYPE = 3 }; + enum OpalShpcSlotState { OPAL_SHPC_DEV_NOT_PRESENT = 0, OPAL_SHPC_DEV_PRESENT = 1 }; + enum OpalExceptionHandler { OPAL_MACHINE_CHECK_HANDLER = 1, OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2, OPAL_SOFTPATCH_HANDLER = 3 }; + enum OpalPendingState { - OPAL_EVENT_OPAL_INTERNAL = 0x1, - OPAL_EVENT_NVRAM = 0x2, - OPAL_EVENT_RTC = 0x4, - OPAL_EVENT_CONSOLE_OUTPUT = 0x8, - OPAL_EVENT_CONSOLE_INPUT = 0x10, - OPAL_EVENT_ERROR_LOG_AVAIL = 0x20, - OPAL_EVENT_ERROR_LOG = 0x40, - OPAL_EVENT_EPOW = 0x80, - OPAL_EVENT_LED_STATUS = 0x100 + OPAL_EVENT_OPAL_INTERNAL = 0x1, + OPAL_EVENT_NVRAM = 0x2, + OPAL_EVENT_RTC = 0x4, + OPAL_EVENT_CONSOLE_OUTPUT = 0x8, + OPAL_EVENT_CONSOLE_INPUT = 0x10, + OPAL_EVENT_ERROR_LOG_AVAIL = 0x20, + OPAL_EVENT_ERROR_LOG = 0x40, + OPAL_EVENT_EPOW = 0x80, + OPAL_EVENT_LED_STATUS = 0x100, + OPAL_EVENT_PCI_ERROR = 0x200 }; /* Machine check related definitions */ @@ -364,15 +385,80 @@ struct opal_machine_check_event { } u; }; +enum { + OPAL_P7IOC_DIAG_TYPE_NONE = 0, + OPAL_P7IOC_DIAG_TYPE_RGC = 1, + OPAL_P7IOC_DIAG_TYPE_BI = 2, + OPAL_P7IOC_DIAG_TYPE_CI = 3, + OPAL_P7IOC_DIAG_TYPE_MISC = 4, + OPAL_P7IOC_DIAG_TYPE_I2C = 5, + OPAL_P7IOC_DIAG_TYPE_LAST = 6 +}; + +struct OpalIoP7IOCErrorData { + uint16_t type; + + /* GEM */ + uint64_t gemXfir; + uint64_t gemRfir; + uint64_t gemRirqfir; + uint64_t gemMask; + uint64_t gemRwof; + + /* LEM */ + uint64_t lemFir; + uint64_t lemErrMask; + uint64_t lemAction0; + uint64_t lemAction1; + uint64_t lemWof; + + union { + struct OpalIoP7IOCRgcErrorData { + uint64_t rgcStatus; /* 3E1C10 */ + uint64_t rgcLdcp; /* 3E1C18 */ + }rgc; + struct OpalIoP7IOCBiErrorData { + uint64_t biLdcp0; /* 3C0100, 3C0118 */ + uint64_t biLdcp1; /* 3C0108, 3C0120 */ + uint64_t biLdcp2; /* 3C0110, 3C0128 */ + uint64_t biFenceStatus; /* 3C0130, 3C0130 */ + + uint8_t biDownbound; /* BI Downbound or Upbound */ + }bi; + struct OpalIoP7IOCCiErrorData { + uint64_t ciPortStatus; /* 3Dn008 */ + uint64_t ciPortLdcp; /* 3Dn010 */ + + uint8_t ciPort; /* Index of CI port: 0/1 */ + }ci; + }; +}; + /** * This structure defines the overlay which will be used to store PHB error * data upon request. */ enum { + OPAL_PHB_ERROR_DATA_VERSION_1 = 1, +}; + +enum { + OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1, +}; + +enum { OPAL_P7IOC_NUM_PEST_REGS = 128, }; +struct OpalIoPhbErrorCommon { + uint32_t version; + uint32_t ioType; + uint32_t len; +}; + struct OpalIoP7IOCPhbErrorData { + struct OpalIoPhbErrorCommon common; + uint32_t brdgCtl; // P7IOC utl regs @@ -530,14 +616,21 @@ int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number, uint64_t pci_mem_size); int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state); -int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer, uint64_t diag_buffer_len); -int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer, uint64_t diag_buffer_len); +int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer, + uint64_t diag_buffer_len); +int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer, + uint64_t diag_buffer_len); +int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer, + uint64_t diag_buffer_len); int64_t opal_pci_fence_phb(uint64_t phb_id); int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); int64_t opal_get_epow_status(uint64_t *status); int64_t opal_set_system_attention_led(uint8_t led_action); +int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, + uint16_t *pci_error_type, uint16_t *severity); +int64_t opal_pci_poll(uint64_t phb_id); /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); @@ -551,6 +644,11 @@ extern void hvc_opal_init_early(void); extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); +extern int opal_notifier_register(struct notifier_block *nb); +extern void opal_notifier_enable(void); +extern void opal_notifier_disable(void); +extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); + extern int opal_get_chars(uint32_t vtermno, char *buf, int count); extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index f265049dd7d6..2dd7bfc459be 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -60,6 +60,7 @@ struct power_pmu { #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ #define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ +#define PPMU_EBB 0x00000100 /* supports event based branch */ /* * Values for flags to get_alternatives() @@ -68,6 +69,11 @@ struct power_pmu { #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ +/* + * We use the event config bit 63 as a flag to request EBB. + */ +#define EVENT_CONFIG_EBB_SHIFT 63 + extern int register_power_pmu(struct power_pmu *); struct pt_regs; diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index b66ae722a8e9..f65e27b09bd3 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -221,17 +221,17 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE), + return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL|__GFP_REPEAT); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { - kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd); + kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); } #define __pmd_free_tlb(tlb, pmd, addr) \ - pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE) + pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX) #ifndef CONFIG_PPC_64K_PAGES #define __pud_free_tlb(tlb, pud, addr) \ pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h index 45142d640720..a56b82fb0609 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h @@ -33,7 +33,8 @@ #define PGDIR_MASK (~(PGDIR_SIZE-1)) /* Bits to mask out from a PMD to get to the PTE page */ -#define PMD_MASKED_BITS 0x1ff +/* PMDs point to PTE table fragments which are 4K aligned. */ +#define PMD_MASKED_BITS 0xfff /* Bits to mask out from a PGD/PUD to get to the PMD page */ #define PUD_MASKED_BITS 0x1ff diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index e3d55f6f24fe..46db09414a10 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -10,6 +10,7 @@ #else #include <asm/pgtable-ppc64-4k.h> #endif +#include <asm/barrier.h> #define FIRST_USER_ADDRESS 0 @@ -20,7 +21,11 @@ PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) - +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1) +#else +#define PMD_CACHE_INDEX PMD_INDEX_SIZE +#endif /* * Define the address range of the kernel non-linear virtual area */ @@ -150,7 +155,7 @@ #define pmd_present(pmd) (pmd_val(pmd) != 0) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) -#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd)) +extern struct page *pmd_page(pmd_t pmd); #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) #define pud_none(pud) (!pud_val(pud)) @@ -339,43 +344,217 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); void pgtable_cache_init(void); +#endif /* __ASSEMBLY__ */ + +/* + * THP pages can't be special. So use the _PAGE_SPECIAL + */ +#define _PAGE_SPLITTING _PAGE_SPECIAL + +/* + * We need to differentiate between explicit huge page and THP huge + * page, since THP huge page also need to track real subpage details + */ +#define _PAGE_THP_HUGE _PAGE_4K_PFN /* - * find_linux_pte returns the address of a linux pte for a given - * effective address and directory. If not found, it returns zero. + * set of bits not changed in pmd_modify. */ -static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) +#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ + _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ + _PAGE_THP_HUGE) + +#ifndef __ASSEMBLY__ +/* + * The linux hugepage PMD now include the pmd entries followed by the address + * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. + * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per + * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and + * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. + * + * The last three bits are intentionally left to zero. This memory location + * are also used as normal page PTE pointers. So if we have any pointers + * left around while we collapse a hugepage, we need to make sure + * _PAGE_PRESENT and _PAGE_FILE bits of that are zero when we look at them + */ +static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) { - pgd_t *pg; - pud_t *pu; - pmd_t *pm; - pte_t *pt = NULL; - - pg = pgdir + pgd_index(ea); - if (!pgd_none(*pg)) { - pu = pud_offset(pg, ea); - if (!pud_none(*pu)) { - pm = pmd_offset(pu, ea); - if (pmd_present(*pm)) - pt = pte_offset_kernel(pm, ea); - } - } - return pt; + return (hpte_slot_array[index] >> 3) & 0x1; } -#ifdef CONFIG_HUGETLB_PAGE -pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - unsigned *shift); -#else -static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - unsigned *shift) +static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, + int index) { - if (shift) - *shift = 0; - return find_linux_pte(pgdir, ea); + return hpte_slot_array[index] >> 4; } -#endif /* !CONFIG_HUGETLB_PAGE */ -#endif /* __ASSEMBLY__ */ +static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, + unsigned int index, unsigned int hidx) +{ + hpte_slot_array[index] = hidx << 4 | 0x1 << 3; +} +static inline char *get_hpte_slot_array(pmd_t *pmdp) +{ + /* + * The hpte hindex is stored in the pgtable whose address is in the + * second half of the PMD + * + * Order this load with the test for pmd_trans_huge in the caller + */ + smp_rmb(); + return *(char **)(pmdp + PTRS_PER_PMD); + + +} + +extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); +extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); +extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); +extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd); +extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd); + +static inline int pmd_trans_huge(pmd_t pmd) +{ + /* + * leaf pte for huge page, bottom two bits != 00 + */ + return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); +} + +static inline int pmd_large(pmd_t pmd) +{ + /* + * leaf pte for huge page, bottom two bits != 00 + */ + if (pmd_trans_huge(pmd)) + return pmd_val(pmd) & _PAGE_PRESENT; + return 0; +} + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + if (pmd_trans_huge(pmd)) + return pmd_val(pmd) & _PAGE_SPLITTING; + return 0; +} + +extern int has_transparent_hugepage(void); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} + +static inline pte_t *pmdp_ptep(pmd_t *pmd) +{ + return (pte_t *)pmd; +} + +#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) +#define pmd_young(pmd) pte_young(pmd_pte(pmd)) +#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) +#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) +#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) +#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) +#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write(pmd) pte_write(pmd_pte(pmd)) + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + /* Do nothing, mk_pmd() does this part. */ + return pmd; +} + +static inline pmd_t pmd_mknotpresent(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_PRESENT; + return pmd; +} + +static inline pmd_t pmd_mksplitting(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_SPLITTING; + return pmd; +} + +#define __HAVE_ARCH_PMD_SAME +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); +} + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); + +extern unsigned long pmd_hugepage_update(struct mm_struct *mm, + unsigned long addr, + pmd_t *pmdp, unsigned long clr); + +static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + unsigned long old; + + if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + return 0; + old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); + return ((old & _PAGE_ACCESSED) != 0); +} + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +extern pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_CLEAR_FLUSH +extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + + if ((pmd_val(*pmdp) & _PAGE_RW) == 0) + return; + + pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); +} + +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +#define __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_INVALIDATE +extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index b6293d26bd39..7d6eacf249cf 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -217,6 +217,12 @@ extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr); +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +#define pmd_large(pmd) 0 +#define has_transparent_hugepage() 0 +#endif +pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, + unsigned *shift); #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h index 5f1e15b68704..3421637cfd7b 100644 --- a/arch/powerpc/include/asm/probes.h +++ b/arch/powerpc/include/asm/probes.h @@ -38,5 +38,30 @@ typedef u32 ppc_opcode_t; #define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) #endif /* CONFIG_PPC64 */ +#ifdef CONFIG_PPC_ADV_DEBUG_REGS +#define MSR_SINGLESTEP (MSR_DE) +#else +#define MSR_SINGLESTEP (MSR_SE) +#endif + +/* Enable single stepping for the current task */ +static inline void enable_single_step(struct pt_regs *regs) +{ + regs->msr |= MSR_SINGLESTEP; +#ifdef CONFIG_PPC_ADV_DEBUG_REGS + /* + * We turn off Critical Input Exception(CE) to ensure that the single + * step will be for the instruction we have the probe on; if we don't, + * it is possible we'd get the single step reported for CE. + */ + regs->msr &= ~MSR_CE; + mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); +#ifdef CONFIG_PPC_47x + isync(); +#endif +#endif +} + + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PROBES_H */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 14a658363698..47a35b08b963 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -168,10 +168,10 @@ struct thread_struct { * The following help to manage the use of Debug Control Registers * om the BookE platforms. */ - unsigned long dbcr0; - unsigned long dbcr1; + uint32_t dbcr0; + uint32_t dbcr1; #ifdef CONFIG_BOOKE - unsigned long dbcr2; + uint32_t dbcr2; #endif /* * The stored value of the DBSR register will be the value at the @@ -179,7 +179,7 @@ struct thread_struct { * user (will never be written to) and has value while helping to * describe the reason for the last debug trap. Torez */ - unsigned long dbsr; + uint32_t dbsr; /* * The following will contain addresses used by debug applications * to help trace and trap on particular address locations. @@ -200,7 +200,7 @@ struct thread_struct { #endif #endif /* FP and VSX 0-31 register set */ - double fpr[32][TS_FPRWIDTH]; + double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); struct { unsigned int pad; @@ -287,9 +287,9 @@ struct thread_struct { unsigned long siar; unsigned long sdar; unsigned long sier; - unsigned long mmcr0; unsigned long mmcr2; - unsigned long mmcra; + unsigned mmcr0; + unsigned used_ebb; #endif }; @@ -404,9 +404,7 @@ static inline void prefetchw(const void *x) #define spin_lock_prefetch(x) prefetchw(x) -#ifdef CONFIG_PPC64 #define HAVE_ARCH_PICK_MMAP_LAYOUT -#endif #ifdef CONFIG_PPC64 static inline unsigned long get_clean_sp(unsigned long sp, int is_32) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 4a9e408644fe..5d7d9c2a5473 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -621,11 +621,15 @@ #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ #define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ +#define MMCR0_EBE 0x00100000UL /* Event based branch enable */ +#define MMCR0_PMCC 0x000c0000UL /* PMC control */ +#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */ #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ #define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/ #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */ #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */ +#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */ #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */ #define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */ #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ @@ -673,6 +677,11 @@ #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ +/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */ +#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO) +#define MMCR2_USER_MASK 0x4020100804020000UL /* (FC1P|FC2P|FC3P|FC4P|FC5P|FC6P) */ +#define SIER_USER_MASK 0x7fffffUL + #define SPRN_PA6T_MMCR0 795 #define PA6T_MMCR0_EN0 0x0000000000000001UL #define PA6T_MMCR0_EN1 0x0000000000000002UL diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 34fd70488d83..c7a8bfc9f6f5 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -350,8 +350,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg) (devfn << 8) | (reg & 0xff); } -extern void __cpuinit rtas_give_timebase(void); -extern void __cpuinit rtas_take_timebase(void); +extern void rtas_give_timebase(void); +extern void rtas_take_timebase(void); #ifdef CONFIG_PPC_RTAS static inline int page_is_rtas_user_buf(unsigned long pfn) diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 200d763a0a67..49a13e0ef234 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -67,4 +67,18 @@ static inline void flush_spe_to_thread(struct task_struct *t) } #endif +static inline void clear_task_ebb(struct task_struct *t) +{ +#ifdef CONFIG_PPC_BOOK3S_64 + /* EBB perf events are not inherited, so clear all EBB state. */ + t->thread.bescr = 0; + t->thread.mmcr2 = 0; + t->thread.mmcr0 = 0; + t->thread.siar = 0; + t->thread.sdar = 0; + t->thread.sier = 0; + t->thread.used_ebb = 0; +#endif +} + #endif /* _ASM_POWERPC_SWITCH_TO_H */ diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index 61a59271665b..2def01ed0cb2 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -165,7 +165,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, /* Private function for use by PCI IO mapping code */ extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, unsigned long end); - +extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr); #else #error Unsupported MMU type #endif diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 50f261bc3e95..0d9cecddf8a4 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -22,7 +22,7 @@ extern unsigned long vdso64_rt_sigtramp; extern unsigned long vdso32_sigtramp; extern unsigned long vdso32_rt_sigtramp; -int __cpuinit vdso_getcpu_init(void); +int vdso_getcpu_init(void); #else /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index a36daf3c6f9a..a6d74467c9ed 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -81,4 +81,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index f960a7944553..a8619bfe879e 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -58,6 +58,8 @@ obj-$(CONFIG_RTAS_PROC) += rtas-proc.o obj-$(CONFIG_LPARCFG) += lparcfg.o obj-$(CONFIG_IBMVIO) += vio.o obj-$(CONFIG_IBMEBUS) += ibmebus.o +obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ + eeh_driver.o eeh_event.o eeh_sysfs.o obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_FA_DUMP) += fadump.o @@ -100,7 +102,7 @@ obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o -pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o +pci64-$(CONFIG_PPC64) += pci_dn.o pci-hotplug.o isa-bridge.o obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \ pci-common.o pci_of_scan.o obj-$(CONFIG_PCI_MSI) += msi.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index a67c76ef5feb..26098c20936d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -105,9 +105,6 @@ int main(void) DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); #else /* CONFIG_PPC64 */ DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); -#endif #ifdef CONFIG_SPE DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); @@ -115,6 +112,9 @@ int main(void) DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); #endif /* CONFIG_SPE */ #endif /* CONFIG_PPC64 */ +#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) + DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); +#endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); #endif @@ -132,7 +132,6 @@ int main(void) DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier)); DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0)); DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2)); - DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra)); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch)); diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index 92c6b008dd2b..9262cf2bec4b 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -131,7 +131,8 @@ static const char *cache_type_string(const struct cache *cache) return cache_type_info[cache->type].name; } -static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode) +static void cache_init(struct cache *cache, int type, int level, + struct device_node *ofnode) { cache->type = type; cache->level = level; @@ -140,7 +141,7 @@ static void __cpuinit cache_init(struct cache *cache, int type, int level, struc list_add(&cache->list, &cache_list); } -static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode) +static struct cache *new_cache(int type, int level, struct device_node *ofnode) { struct cache *cache; @@ -324,7 +325,8 @@ static bool cache_node_is_unified(const struct device_node *np) return of_get_property(np, "cache-unified", NULL); } -static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level) +static struct cache *cache_do_one_devnode_unified(struct device_node *node, + int level) { struct cache *cache; @@ -335,7 +337,8 @@ static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node * return cache; } -static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level) +static struct cache *cache_do_one_devnode_split(struct device_node *node, + int level) { struct cache *dcache, *icache; @@ -357,7 +360,7 @@ err: return NULL; } -static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level) +static struct cache *cache_do_one_devnode(struct device_node *node, int level) { struct cache *cache; @@ -369,7 +372,8 @@ static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, in return cache; } -static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level) +static struct cache *cache_lookup_or_instantiate(struct device_node *node, + int level) { struct cache *cache; @@ -385,7 +389,7 @@ static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *n return cache; } -static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger) +static void link_cache_lists(struct cache *smaller, struct cache *bigger) { while (smaller->next_local) { if (smaller->next_local == bigger) @@ -396,13 +400,13 @@ static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigg smaller->next_local = bigger; } -static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache) +static void do_subsidiary_caches_debugcheck(struct cache *cache) { WARN_ON_ONCE(cache->level != 1); WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu")); } -static void __cpuinit do_subsidiary_caches(struct cache *cache) +static void do_subsidiary_caches(struct cache *cache) { struct device_node *subcache_node; int level = cache->level; @@ -423,7 +427,7 @@ static void __cpuinit do_subsidiary_caches(struct cache *cache) } } -static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id) +static struct cache *cache_chain_instantiate(unsigned int cpu_id) { struct device_node *cpu_node; struct cache *cpu_cache = NULL; @@ -448,7 +452,7 @@ out: return cpu_cache; } -static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id) +static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id) { struct cache_dir *cache_dir; struct device *dev; @@ -653,7 +657,7 @@ static struct kobj_type cache_index_type = { .default_attrs = cache_index_default_attrs, }; -static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) +static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) { const char *cache_name; const char *cache_type; @@ -696,7 +700,8 @@ static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *d kfree(buf); } -static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir) +static void cacheinfo_create_index_dir(struct cache *cache, int index, + struct cache_dir *cache_dir) { struct cache_index_dir *index_dir; int rc; @@ -722,7 +727,8 @@ err: kfree(index_dir); } -static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list) +static void cacheinfo_sysfs_populate(unsigned int cpu_id, + struct cache *cache_list) { struct cache_dir *cache_dir; struct cache *cache; @@ -740,7 +746,7 @@ static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache } } -void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id) +void cacheinfo_cpu_online(unsigned int cpu_id) { struct cache *cache; diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 9ec3fe174cba..779a78c26435 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -69,16 +69,6 @@ void __init setup_kdump_trampoline(void) } #endif /* CONFIG_NONSTATIC_KERNEL */ -static int __init parse_savemaxmem(char *p) -{ - if (p) - saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; - - return 1; -} -__setup("savemaxmem=", parse_savemaxmem); - - static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, unsigned long offset, int userbuf) { diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/kernel/eeh.c index 6b73d6c44f51..39954fe941b8 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -103,11 +103,8 @@ EXPORT_SYMBOL(eeh_subsystem_enabled); */ int eeh_probe_mode; -/* Global EEH mutex */ -DEFINE_MUTEX(eeh_mutex); - /* Lock to avoid races due to multiple reports of an error */ -static DEFINE_RAW_SPINLOCK(confirm_error_lock); +DEFINE_RAW_SPINLOCK(confirm_error_lock); /* Buffer for reporting pci register dumps. Its here in BSS, and * not dynamically alloced, so that it ends up in RMO where RTAS @@ -235,16 +232,30 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) { size_t loglen = 0; struct eeh_dev *edev; + bool valid_cfg_log = true; - eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); - eeh_ops->configure_bridge(pe); - eeh_pe_restore_bars(pe); - - pci_regs_buf[0] = 0; - eeh_pe_for_each_dev(pe, edev) { - loglen += eeh_gather_pci_data(edev, pci_regs_buf, - EEH_PCI_REGS_LOG_LEN); - } + /* + * When the PHB is fenced or dead, it's pointless to collect + * the data from PCI config space because it should return + * 0xFF's. For ER, we still retrieve the data from the PCI + * config space. + */ + if (eeh_probe_mode_dev() && + (pe->type & EEH_PE_PHB) && + (pe->state & (EEH_PE_ISOLATED | EEH_PE_PHB_DEAD))) + valid_cfg_log = false; + + if (valid_cfg_log) { + eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); + eeh_ops->configure_bridge(pe); + eeh_pe_restore_bars(pe); + + pci_regs_buf[0] = 0; + eeh_pe_for_each_dev(pe, edev) { + loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen, + EEH_PCI_REGS_LOG_LEN - loglen); + } + } eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); } @@ -260,15 +271,74 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) { pte_t *ptep; unsigned long pa; + int hugepage_shift; - ptep = find_linux_pte(init_mm.pgd, token); + /* + * We won't find hugepages here, iomem + */ + ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift); if (!ptep) return token; + WARN_ON(hugepage_shift); pa = pte_pfn(*ptep) << PAGE_SHIFT; return pa | (token & (PAGE_SIZE-1)); } +/* + * On PowerNV platform, we might already have fenced PHB there. + * For that case, it's meaningless to recover frozen PE. Intead, + * We have to handle fenced PHB firstly. + */ +static int eeh_phb_check_failure(struct eeh_pe *pe) +{ + struct eeh_pe *phb_pe; + unsigned long flags; + int ret; + + if (!eeh_probe_mode_dev()) + return -EPERM; + + /* Find the PHB PE */ + phb_pe = eeh_phb_pe_get(pe->phb); + if (!phb_pe) { + pr_warning("%s Can't find PE for PHB#%d\n", + __func__, pe->phb->global_number); + return -EEXIST; + } + + /* If the PHB has been in problematic state */ + eeh_serialize_lock(&flags); + if (phb_pe->state & (EEH_PE_ISOLATED | EEH_PE_PHB_DEAD)) { + ret = 0; + goto out; + } + + /* Check PHB state */ + ret = eeh_ops->get_state(phb_pe, NULL); + if ((ret < 0) || + (ret == EEH_STATE_NOT_SUPPORT) || + (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == + (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) { + ret = 0; + goto out; + } + + /* Isolate the PHB and send event */ + eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); + eeh_serialize_unlock(flags); + eeh_send_failure_event(phb_pe); + + pr_err("EEH: PHB#%x failure detected\n", + phb_pe->phb->global_number); + dump_stack(); + + return 1; +out: + eeh_serialize_unlock(flags); + return ret; +} + /** * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze * @edev: eeh device @@ -319,13 +389,21 @@ int eeh_dev_check_failure(struct eeh_dev *edev) return 0; } + /* + * On PowerNV platform, we might already have fenced PHB + * there and we need take care of that firstly. + */ + ret = eeh_phb_check_failure(pe); + if (ret > 0) + return ret; + /* If we already have a pending isolation event for this * slot, we know it's bad already, we don't need to check. * Do this checking under a lock; as multiple PCI devices * in one slot might report errors simultaneously, and we * only want one error recovery routine running. */ - raw_spin_lock_irqsave(&confirm_error_lock, flags); + eeh_serialize_lock(&flags); rc = 1; if (pe->state & EEH_PE_ISOLATED) { pe->check_count++; @@ -368,13 +446,13 @@ int eeh_dev_check_failure(struct eeh_dev *edev) } eeh_stats.slot_resets++; - + /* Avoid repeated reports of this failure, including problems * with other functions on this device, and functions under * bridges. */ eeh_pe_state_mark(pe, EEH_PE_ISOLATED); - raw_spin_unlock_irqrestore(&confirm_error_lock, flags); + eeh_serialize_unlock(flags); eeh_send_failure_event(pe); @@ -382,11 +460,14 @@ int eeh_dev_check_failure(struct eeh_dev *edev) * a stack trace will help the device-driver authors figure * out what happened. So print that out. */ - WARN(1, "EEH: failure detected\n"); + pr_err("EEH: Frozen PE#%x detected on PHB#%x\n", + pe->addr, pe->phb->global_number); + dump_stack(); + return 1; dn_unlock: - raw_spin_unlock_irqrestore(&confirm_error_lock, flags); + eeh_serialize_unlock(flags); return rc; } @@ -525,7 +606,7 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) * or a fundamental reset (3). * A fundamental reset required by any device under * Partitionable Endpoint trumps hot-reset. - */ + */ eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset); if (freset) @@ -538,8 +619,8 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) */ #define PCI_BUS_RST_HOLD_TIME_MSEC 250 msleep(PCI_BUS_RST_HOLD_TIME_MSEC); - - /* We might get hit with another EEH freeze as soon as the + + /* We might get hit with another EEH freeze as soon as the * pci slot reset line is dropped. Make sure we don't miss * these, and clear the flag now. */ @@ -565,6 +646,7 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) */ int eeh_reset_pe(struct eeh_pe *pe) { + int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); int i, rc; /* Take three shots at resetting the bus */ @@ -572,7 +654,7 @@ int eeh_reset_pe(struct eeh_pe *pe) eeh_reset_pe_once(pe); rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); - if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) + if ((rc & flags) == flags) return 0; if (rc < 0) { @@ -604,7 +686,7 @@ void eeh_save_bars(struct eeh_dev *edev) if (!edev) return; dn = eeh_dev_to_of_node(edev); - + for (i = 0; i < 16; i++) eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]); } @@ -674,11 +756,21 @@ int __exit eeh_ops_unregister(const char *name) * Even if force-off is set, the EEH hardware is still enabled, so that * newer systems can boot. */ -static int __init eeh_init(void) +int eeh_init(void) { struct pci_controller *hose, *tmp; struct device_node *phb; - int ret; + static int cnt = 0; + int ret = 0; + + /* + * We have to delay the initialization on PowerNV after + * the PCI hierarchy tree has been built because the PEs + * are figured out based on PCI devices instead of device + * tree nodes + */ + if (machine_is(powernv) && cnt++ <= 0) + return ret; /* call platform initialization function */ if (!eeh_ops) { @@ -691,7 +783,10 @@ static int __init eeh_init(void) return ret; } - raw_spin_lock_init(&confirm_error_lock); + /* Initialize EEH event */ + ret = eeh_event_init(); + if (ret) + return ret; /* Enable EEH for all adapters */ if (eeh_probe_mode_devtree()) { @@ -700,6 +795,25 @@ static int __init eeh_init(void) phb = hose->dn; traverse_pci_devices(phb, eeh_ops->of_probe, NULL); } + } else if (eeh_probe_mode_dev()) { + list_for_each_entry_safe(hose, tmp, + &hose_list, list_node) + pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL); + } else { + pr_warning("%s: Invalid probe mode %d\n", + __func__, eeh_probe_mode); + return -EINVAL; + } + + /* + * Call platform post-initialization. Actually, It's good chance + * to inform platform that EEH is ready to supply service if the + * I/O cache stuff has been built up. + */ + if (eeh_ops->post_init) { + ret = eeh_ops->post_init(); + if (ret) + return ret; } if (eeh_subsystem_enabled) @@ -728,6 +842,14 @@ static void eeh_add_device_early(struct device_node *dn) { struct pci_controller *phb; + /* + * If we're doing EEH probe based on PCI device, we + * would delay the probe until late stage because + * the PCI device isn't available this moment. + */ + if (!eeh_probe_mode_devtree()) + return; + if (!of_node_to_eeh_dev(dn)) return; phb = of_node_to_eeh_dev(dn)->phb; @@ -736,7 +858,6 @@ static void eeh_add_device_early(struct device_node *dn) if (NULL == phb || 0 == phb->buid) return; - /* FIXME: hotplug support on POWERNV */ eeh_ops->of_probe(dn, NULL); } @@ -787,6 +908,13 @@ static void eeh_add_device_late(struct pci_dev *dev) edev->pdev = dev; dev->dev.archdata.edev = edev; + /* + * We have to do the EEH probe here because the PCI device + * hasn't been created yet in the early stage. + */ + if (eeh_probe_mode_dev()) + eeh_ops->dev_probe(dev, NULL); + eeh_addr_cache_insert_dev(dev); } @@ -803,12 +931,12 @@ void eeh_add_device_tree_late(struct pci_bus *bus) struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { - eeh_add_device_late(dev); - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { - struct pci_bus *subbus = dev->subordinate; - if (subbus) - eeh_add_device_tree_late(subbus); - } + eeh_add_device_late(dev); + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { + struct pci_bus *subbus = dev->subordinate; + if (subbus) + eeh_add_device_tree_late(subbus); + } } } EXPORT_SYMBOL_GPL(eeh_add_device_tree_late); diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c index 5ce3ba7ad137..f9ac1232a746 100644 --- a/arch/powerpc/platforms/pseries/eeh_cache.c +++ b/arch/powerpc/kernel/eeh_cache.c @@ -194,7 +194,7 @@ static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) } /* Skip any devices for which EEH is not enabled. */ - if (!edev->pe) { + if (!eeh_probe_mode_dev() && !edev->pe) { #ifdef DEBUG pr_info("PCI: skip building address cache for=%s - %s\n", pci_name(dev), dn->full_name); @@ -285,7 +285,7 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev) * Must be run late in boot process, after the pci controllers * have been scanned for devices (after all device resources are known). */ -void __init eeh_addr_cache_build(void) +void eeh_addr_cache_build(void) { struct device_node *dn; struct eeh_dev *edev; @@ -316,4 +316,3 @@ void __init eeh_addr_cache_build(void) eeh_addr_cache_print(&pci_io_addr_cache_root); #endif } - diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c index 1efa28f5fc54..1efa28f5fc54 100644 --- a/arch/powerpc/platforms/pseries/eeh_dev.c +++ b/arch/powerpc/kernel/eeh_dev.c diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index a3fefb61097c..2b1ce17cae50 100644 --- a/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -154,9 +154,9 @@ static void eeh_enable_irq(struct pci_dev *dev) * eeh_report_error - Report pci error to each device driver * @data: eeh device * @userdata: return value - * - * Report an EEH error to each device driver, collect up and - * merge the device driver responses. Cumulative response + * + * Report an EEH error to each device driver, collect up and + * merge the device driver responses. Cumulative response * passed back in "userdata". */ static void *eeh_report_error(void *data, void *userdata) @@ -349,10 +349,12 @@ static void *eeh_report_failure(void *data, void *userdata) */ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) { + struct timeval tstamp; int cnt, rc; /* pcibios will clear the counter; save the value */ cnt = pe->freeze_count; + tstamp = pe->tstamp; /* * We don't remove the corresponding PE instances because @@ -376,15 +378,17 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) eeh_pe_restore_bars(pe); /* Give the system 5 seconds to finish running the user-space - * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, - * this is a hack, but if we don't do this, and try to bring - * the device up before the scripts have taken it down, + * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, + * this is a hack, but if we don't do this, and try to bring + * the device up before the scripts have taken it down, * potentially weird things happen. */ if (bus) { ssleep(5); pcibios_add_pci_devices(bus); } + + pe->tstamp = tstamp; pe->freeze_count = cnt; return 0; @@ -395,24 +399,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) */ #define MAX_WAIT_FOR_RECOVERY 150 -/** - * eeh_handle_event - Reset a PCI device after hard lockup. - * @pe: EEH PE - * - * While PHB detects address or data parity errors on particular PCI - * slot, the associated PE will be frozen. Besides, DMA's occurring - * to wild addresses (which usually happen due to bugs in device - * drivers or in PCI adapter firmware) can cause EEH error. #SERR, - * #PERR or other misc PCI-related errors also can trigger EEH errors. - * - * Recovery process consists of unplugging the device driver (which - * generated hotplug events to userspace), then issuing a PCI #RST to - * the device, then reconfiguring the PCI config space for all bridges - * & devices under this slot, and then finally restarting the device - * drivers (which cause a second set of hotplug events to go out to - * userspace). - */ -void eeh_handle_event(struct eeh_pe *pe) +static void eeh_handle_normal_event(struct eeh_pe *pe) { struct pci_bus *frozen_bus; int rc = 0; @@ -425,6 +412,7 @@ void eeh_handle_event(struct eeh_pe *pe) return; } + eeh_pe_update_time_stamp(pe); pe->freeze_count++; if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) goto excess_failures; @@ -437,6 +425,7 @@ void eeh_handle_event(struct eeh_pe *pe) * status ... if any child can't handle the reset, then the entire * slot is dlpar removed and added. */ + pr_info("EEH: Notify device drivers to shutdown\n"); eeh_pe_dev_traverse(pe, eeh_report_error, &result); /* Get the current PCI slot state. This can take a long time, @@ -444,7 +433,7 @@ void eeh_handle_event(struct eeh_pe *pe) */ rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000); if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { - printk(KERN_WARNING "EEH: Permanent failure\n"); + pr_warning("EEH: Permanent failure\n"); goto hard_fail; } @@ -452,6 +441,7 @@ void eeh_handle_event(struct eeh_pe *pe) * don't post the error log until after all dev drivers * have been informed. */ + pr_info("EEH: Collect temporary log\n"); eeh_slot_error_detail(pe, EEH_LOG_TEMP); /* If all device drivers were EEH-unaware, then shut @@ -459,15 +449,18 @@ void eeh_handle_event(struct eeh_pe *pe) * go down willingly, without panicing the system. */ if (result == PCI_ERS_RESULT_NONE) { + pr_info("EEH: Reset with hotplug activity\n"); rc = eeh_reset_device(pe, frozen_bus); if (rc) { - printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc); + pr_warning("%s: Unable to reset, err=%d\n", + __func__, rc); goto hard_fail; } } /* If all devices reported they can proceed, then re-enable MMIO */ if (result == PCI_ERS_RESULT_CAN_RECOVER) { + pr_info("EEH: Enable I/O for affected devices\n"); rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); if (rc < 0) @@ -475,6 +468,7 @@ void eeh_handle_event(struct eeh_pe *pe) if (rc) { result = PCI_ERS_RESULT_NEED_RESET; } else { + pr_info("EEH: Notify device drivers to resume I/O\n"); result = PCI_ERS_RESULT_NONE; eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result); } @@ -482,6 +476,7 @@ void eeh_handle_event(struct eeh_pe *pe) /* If all devices reported they can proceed, then re-enable DMA */ if (result == PCI_ERS_RESULT_CAN_RECOVER) { + pr_info("EEH: Enabled DMA for affected devices\n"); rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); if (rc < 0) @@ -494,17 +489,22 @@ void eeh_handle_event(struct eeh_pe *pe) /* If any device has a hard failure, then shut off everything. */ if (result == PCI_ERS_RESULT_DISCONNECT) { - printk(KERN_WARNING "EEH: Device driver gave up\n"); + pr_warning("EEH: Device driver gave up\n"); goto hard_fail; } /* If any device called out for a reset, then reset the slot */ if (result == PCI_ERS_RESULT_NEED_RESET) { + pr_info("EEH: Reset without hotplug activity\n"); rc = eeh_reset_device(pe, NULL); if (rc) { - printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc); + pr_warning("%s: Cannot reset, err=%d\n", + __func__, rc); goto hard_fail; } + + pr_info("EEH: Notify device drivers " + "the completion of reset\n"); result = PCI_ERS_RESULT_NONE; eeh_pe_dev_traverse(pe, eeh_report_reset, &result); } @@ -512,15 +512,16 @@ void eeh_handle_event(struct eeh_pe *pe) /* All devices should claim they have recovered by now. */ if ((result != PCI_ERS_RESULT_RECOVERED) && (result != PCI_ERS_RESULT_NONE)) { - printk(KERN_WARNING "EEH: Not recovered\n"); + pr_warning("EEH: Not recovered\n"); goto hard_fail; } /* Tell all device drivers that they can resume operations */ + pr_info("EEH: Notify device driver to resume\n"); eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); return; - + excess_failures: /* * About 90% of all real-life EEH failures in the field @@ -550,3 +551,111 @@ perm_error: pcibios_remove_pci_devices(frozen_bus); } +static void eeh_handle_special_event(void) +{ + struct eeh_pe *pe, *phb_pe; + struct pci_bus *bus; + struct pci_controller *hose, *tmp; + unsigned long flags; + int rc = 0; + + /* + * The return value from next_error() has been classified as follows. + * It might be good to enumerate them. However, next_error() is only + * supported by PowerNV platform for now. So it would be fine to use + * integer directly: + * + * 4 - Dead IOC 3 - Dead PHB + * 2 - Fenced PHB 1 - Frozen PE + * 0 - No error found + * + */ + rc = eeh_ops->next_error(&pe); + if (rc <= 0) + return; + + switch (rc) { + case 4: + /* Mark all PHBs in dead state */ + eeh_serialize_lock(&flags); + list_for_each_entry_safe(hose, tmp, + &hose_list, list_node) { + phb_pe = eeh_phb_pe_get(hose); + if (!phb_pe) continue; + + eeh_pe_state_mark(phb_pe, + EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); + } + eeh_serialize_unlock(flags); + + /* Purge all events */ + eeh_remove_event(NULL); + break; + case 3: + case 2: + case 1: + /* Mark the PE in fenced state */ + eeh_serialize_lock(&flags); + if (rc == 3) + eeh_pe_state_mark(pe, + EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); + else + eeh_pe_state_mark(pe, + EEH_PE_ISOLATED | EEH_PE_RECOVERING); + eeh_serialize_unlock(flags); + + /* Purge all events of the PHB */ + eeh_remove_event(pe); + break; + default: + pr_err("%s: Invalid value %d from next_error()\n", + __func__, rc); + return; + } + + /* + * For fenced PHB and frozen PE, it's handled as normal + * event. We have to remove the affected PHBs for dead + * PHB and IOC + */ + if (rc == 2 || rc == 1) + eeh_handle_normal_event(pe); + else { + list_for_each_entry_safe(hose, tmp, + &hose_list, list_node) { + phb_pe = eeh_phb_pe_get(hose); + if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD)) + continue; + + bus = eeh_pe_bus_get(phb_pe); + /* Notify all devices that they're about to go down. */ + eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); + pcibios_remove_pci_devices(bus); + } + } +} + +/** + * eeh_handle_event - Reset a PCI device after hard lockup. + * @pe: EEH PE + * + * While PHB detects address or data parity errors on particular PCI + * slot, the associated PE will be frozen. Besides, DMA's occurring + * to wild addresses (which usually happen due to bugs in device + * drivers or in PCI adapter firmware) can cause EEH error. #SERR, + * #PERR or other misc PCI-related errors also can trigger EEH errors. + * + * Recovery process consists of unplugging the device driver (which + * generated hotplug events to userspace), then issuing a PCI #RST to + * the device, then reconfiguring the PCI config space for all bridges + * & devices under this slot, and then finally restarting the device + * drivers (which cause a second set of hotplug events to go out to + * userspace). + */ +void eeh_handle_event(struct eeh_pe *pe) +{ + if (pe) + eeh_handle_normal_event(pe); + else + eeh_handle_special_event(); +} diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index 185bedd926df..d27c5afc90ae 100644 --- a/arch/powerpc/platforms/pseries/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -18,11 +18,10 @@ #include <linux/delay.h> #include <linux/list.h> -#include <linux/mutex.h> #include <linux/sched.h> +#include <linux/semaphore.h> #include <linux/pci.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <linux/kthread.h> #include <asm/eeh_event.h> #include <asm/ppc-pci.h> @@ -35,14 +34,9 @@ * work-queue, where a worker thread can drive recovery. */ -/* EEH event workqueue setup. */ static DEFINE_SPINLOCK(eeh_eventlist_lock); +static struct semaphore eeh_eventlist_sem; LIST_HEAD(eeh_eventlist); -static void eeh_thread_launcher(struct work_struct *); -DECLARE_WORK(eeh_event_wq, eeh_thread_launcher); - -/* Serialize reset sequences for a given pci device */ -DEFINE_MUTEX(eeh_event_mutex); /** * eeh_event_handler - Dispatch EEH events. @@ -60,55 +54,63 @@ static int eeh_event_handler(void * dummy) struct eeh_event *event; struct eeh_pe *pe; - spin_lock_irqsave(&eeh_eventlist_lock, flags); - event = NULL; - - /* Unqueue the event, get ready to process. */ - if (!list_empty(&eeh_eventlist)) { - event = list_entry(eeh_eventlist.next, struct eeh_event, list); - list_del(&event->list); - } - spin_unlock_irqrestore(&eeh_eventlist_lock, flags); - - if (event == NULL) - return 0; - - /* Serialize processing of EEH events */ - mutex_lock(&eeh_event_mutex); - pe = event->pe; - eeh_pe_state_mark(pe, EEH_PE_RECOVERING); - pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n", - pe->phb->global_number, pe->addr); - - set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */ - eeh_handle_event(pe); - eeh_pe_state_clear(pe, EEH_PE_RECOVERING); - - kfree(event); - mutex_unlock(&eeh_event_mutex); - - /* If there are no new errors after an hour, clear the counter. */ - if (pe && pe->freeze_count > 0) { - msleep_interruptible(3600*1000); - if (pe->freeze_count > 0) - pe->freeze_count--; - + while (!kthread_should_stop()) { + if (down_interruptible(&eeh_eventlist_sem)) + break; + + /* Fetch EEH event from the queue */ + spin_lock_irqsave(&eeh_eventlist_lock, flags); + event = NULL; + if (!list_empty(&eeh_eventlist)) { + event = list_entry(eeh_eventlist.next, + struct eeh_event, list); + list_del(&event->list); + } + spin_unlock_irqrestore(&eeh_eventlist_lock, flags); + if (!event) + continue; + + /* We might have event without binding PE */ + pe = event->pe; + if (pe) { + eeh_pe_state_mark(pe, EEH_PE_RECOVERING); + pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n", + pe->phb->global_number, pe->addr); + eeh_handle_event(pe); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING); + } else { + eeh_handle_event(NULL); + } + + kfree(event); } return 0; } /** - * eeh_thread_launcher - Start kernel thread to handle EEH events - * @dummy - unused + * eeh_event_init - Start kernel thread to handle EEH events * * This routine is called to start the kernel thread for processing * EEH event. */ -static void eeh_thread_launcher(struct work_struct *dummy) +int eeh_event_init(void) { - if (IS_ERR(kthread_run(eeh_event_handler, NULL, "eehd"))) - printk(KERN_ERR "Failed to start EEH daemon\n"); + struct task_struct *t; + int ret = 0; + + /* Initialize semaphore */ + sema_init(&eeh_eventlist_sem, 0); + + t = kthread_run(eeh_event_handler, NULL, "eehd"); + if (IS_ERR(t)) { + ret = PTR_ERR(t); + pr_err("%s: Failed to start EEH daemon (%d)\n", + __func__, ret); + return ret; + } + + return 0; } /** @@ -136,7 +138,45 @@ int eeh_send_failure_event(struct eeh_pe *pe) list_add(&event->list, &eeh_eventlist); spin_unlock_irqrestore(&eeh_eventlist_lock, flags); - schedule_work(&eeh_event_wq); + /* For EEH deamon to knick in */ + up(&eeh_eventlist_sem); return 0; } + +/** + * eeh_remove_event - Remove EEH event from the queue + * @pe: Event binding to the PE + * + * On PowerNV platform, we might have subsequent coming events + * is part of the former one. For that case, those subsequent + * coming events are totally duplicated and unnecessary, thus + * they should be removed. + */ +void eeh_remove_event(struct eeh_pe *pe) +{ + unsigned long flags; + struct eeh_event *event, *tmp; + + spin_lock_irqsave(&eeh_eventlist_lock, flags); + list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) { + /* + * If we don't have valid PE passed in, that means + * we already have event corresponding to dead IOC + * and all events should be purged. + */ + if (!pe) { + list_del(&event->list); + kfree(event); + } else if (pe->type & EEH_PE_PHB) { + if (event->pe && event->pe->phb == pe->phb) { + list_del(&event->list); + kfree(event); + } + } else if (event->pe == pe) { + list_del(&event->list); + kfree(event); + } + } + spin_unlock_irqrestore(&eeh_eventlist_lock, flags); +} diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 9d4a9e8562b2..016588a6f5ed 100644 --- a/arch/powerpc/platforms/pseries/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -22,6 +22,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/delay.h> #include <linux/export.h> #include <linux/gfp.h> #include <linux/init.h> @@ -78,9 +79,7 @@ int eeh_phb_pe_create(struct pci_controller *phb) } /* Put it into the list */ - eeh_lock(); list_add_tail(&pe->child, &eeh_phb_pe); - eeh_unlock(); pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number); @@ -95,7 +94,7 @@ int eeh_phb_pe_create(struct pci_controller *phb) * hierarchy tree is composed of PHB PEs. The function is used * to retrieve the corresponding PHB PE according to the given PHB. */ -static struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb) +struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb) { struct eeh_pe *pe; @@ -185,21 +184,15 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root, return NULL; } - eeh_lock(); - /* Traverse root PE */ for (pe = root; pe; pe = eeh_pe_next(pe, root)) { eeh_pe_for_each_dev(pe, edev) { ret = fn(edev, flag); - if (ret) { - eeh_unlock(); + if (ret) return ret; - } } } - eeh_unlock(); - return NULL; } @@ -228,7 +221,7 @@ static void *__eeh_pe_get(void *data, void *flag) return pe; /* Try BDF address */ - if (edev->pe_config_addr && + if (edev->config_addr && (edev->config_addr == pe->config_addr)) return pe; @@ -246,7 +239,7 @@ static void *__eeh_pe_get(void *data, void *flag) * which is composed of PCI bus/device/function number, or unified * PE address. */ -static struct eeh_pe *eeh_pe_get(struct eeh_dev *edev) +struct eeh_pe *eeh_pe_get(struct eeh_dev *edev) { struct eeh_pe *root = eeh_phb_pe_get(edev->phb); struct eeh_pe *pe; @@ -305,8 +298,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) { struct eeh_pe *pe, *parent; - eeh_lock(); - /* * Search the PE has been existing or not according * to the PE address. If that has been existing, the @@ -316,7 +307,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) pe = eeh_pe_get(edev); if (pe && !(pe->type & EEH_PE_INVALID)) { if (!edev->pe_config_addr) { - eeh_unlock(); pr_err("%s: PE with addr 0x%x already exists\n", __func__, edev->config_addr); return -EEXIST; @@ -328,7 +318,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) /* Put the edev to PE */ list_add_tail(&edev->list, &pe->edevs); - eeh_unlock(); pr_debug("EEH: Add %s to Bus PE#%x\n", edev->dn->full_name, pe->addr); @@ -347,7 +336,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) parent->type &= ~EEH_PE_INVALID; parent = parent->parent; } - eeh_unlock(); pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", edev->dn->full_name, pe->addr, pe->parent->addr); @@ -357,7 +345,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) /* Create a new EEH PE */ pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE); if (!pe) { - eeh_unlock(); pr_err("%s: out of memory!\n", __func__); return -ENOMEM; } @@ -365,6 +352,17 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) pe->config_addr = edev->config_addr; /* + * While doing PE reset, we probably hot-reset the + * upstream bridge. However, the PCI devices including + * the associated EEH devices might be removed when EEH + * core is doing recovery. So that won't safe to retrieve + * the bridge through downstream EEH device. We have to + * trace the parent PCI bus, then the upstream bridge. + */ + if (eeh_probe_mode_dev()) + pe->bus = eeh_dev_to_pci_dev(edev)->bus; + + /* * Put the new EEH PE into hierarchy tree. If the parent * can't be found, the newly created PE will be attached * to PHB directly. Otherwise, we have to associate the @@ -374,7 +372,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) if (!parent) { parent = eeh_phb_pe_get(edev->phb); if (!parent) { - eeh_unlock(); pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", __func__, edev->phb->global_number); edev->pe = NULL; @@ -391,7 +388,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) list_add_tail(&pe->child, &parent->child_list); list_add_tail(&edev->list, &pe->edevs); edev->pe = pe; - eeh_unlock(); pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", edev->dn->full_name, pe->addr, pe->parent->addr); @@ -419,8 +415,6 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe) return -EEXIST; } - eeh_lock(); - /* Remove the EEH device */ pe = edev->pe; edev->pe = NULL; @@ -465,12 +459,37 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe) pe = parent; } - eeh_unlock(); - return 0; } /** + * eeh_pe_update_time_stamp - Update PE's frozen time stamp + * @pe: EEH PE + * + * We have time stamp for each PE to trace its time of getting + * frozen in last hour. The function should be called to update + * the time stamp on first error of the specific PE. On the other + * handle, we needn't account for errors happened in last hour. + */ +void eeh_pe_update_time_stamp(struct eeh_pe *pe) +{ + struct timeval tstamp; + + if (!pe) return; + + if (pe->freeze_count <= 0) { + pe->freeze_count = 0; + do_gettimeofday(&pe->tstamp); + } else { + do_gettimeofday(&tstamp); + if (tstamp.tv_sec - pe->tstamp.tv_sec > 3600) { + pe->tstamp = tstamp; + pe->freeze_count = 0; + } + } +} + +/** * __eeh_pe_state_mark - Mark the state for the PE * @data: EEH PE * @flag: state @@ -512,9 +531,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag) */ void eeh_pe_state_mark(struct eeh_pe *pe, int state) { - eeh_lock(); eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); - eeh_unlock(); } /** @@ -548,35 +565,135 @@ static void *__eeh_pe_state_clear(void *data, void *flag) */ void eeh_pe_state_clear(struct eeh_pe *pe, int state) { - eeh_lock(); eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); - eeh_unlock(); } -/** - * eeh_restore_one_device_bars - Restore the Base Address Registers for one device - * @data: EEH device - * @flag: Unused +/* + * Some PCI bridges (e.g. PLX bridges) have primary/secondary + * buses assigned explicitly by firmware, and we probably have + * lost that after reset. So we have to delay the check until + * the PCI-CFG registers have been restored for the parent + * bridge. * - * Loads the PCI configuration space base address registers, - * the expansion ROM base address, the latency timer, and etc. - * from the saved values in the device node. + * Don't use normal PCI-CFG accessors, which probably has been + * blocked on normal path during the stage. So we need utilize + * eeh operations, which is always permitted. */ -static void *eeh_restore_one_device_bars(void *data, void *flag) +static void eeh_bridge_check_link(struct pci_dev *pdev, + struct device_node *dn) +{ + int cap; + uint32_t val; + int timeout = 0; + + /* + * We only check root port and downstream ports of + * PCIe switches + */ + if (!pci_is_pcie(pdev) || + (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && + pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)) + return; + + pr_debug("%s: Check PCIe link for %s ...\n", + __func__, pci_name(pdev)); + + /* Check slot status */ + cap = pdev->pcie_cap; + eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val); + if (!(val & PCI_EXP_SLTSTA_PDS)) { + pr_debug(" No card in the slot (0x%04x) !\n", val); + return; + } + + /* Check power status if we have the capability */ + eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val); + if (val & PCI_EXP_SLTCAP_PCP) { + eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val); + if (val & PCI_EXP_SLTCTL_PCC) { + pr_debug(" In power-off state, power it on ...\n"); + val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); + val |= (0x0100 & PCI_EXP_SLTCTL_PIC); + eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val); + msleep(2 * 1000); + } + } + + /* Enable link */ + eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val); + val &= ~PCI_EXP_LNKCTL_LD; + eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val); + + /* Check link */ + eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val); + if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { + pr_debug(" No link reporting capability (0x%08x) \n", val); + msleep(1000); + return; + } + + /* Wait the link is up until timeout (5s) */ + timeout = 0; + while (timeout < 5000) { + msleep(20); + timeout += 20; + + eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val); + if (val & PCI_EXP_LNKSTA_DLLLA) + break; + } + + if (val & PCI_EXP_LNKSTA_DLLLA) + pr_debug(" Link up (%s)\n", + (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB"); + else + pr_debug(" Link not ready (0x%04x)\n", val); +} + +#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) +#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) + +static void eeh_restore_bridge_bars(struct pci_dev *pdev, + struct eeh_dev *edev, + struct device_node *dn) +{ + int i; + + /* + * Device BARs: 0x10 - 0x18 + * Bus numbers and windows: 0x18 - 0x30 + */ + for (i = 4; i < 13; i++) + eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); + /* Rom: 0x38 */ + eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]); + + /* Cache line & Latency timer: 0xC 0xD */ + eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, + SAVED_BYTE(PCI_CACHE_LINE_SIZE)); + eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, + SAVED_BYTE(PCI_LATENCY_TIMER)); + /* Max latency, min grant, interrupt ping and line: 0x3C */ + eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); + + /* PCI Command: 0x4 */ + eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]); + + /* Check the PCIe link is ready */ + eeh_bridge_check_link(pdev, dn); +} + +static void eeh_restore_device_bars(struct eeh_dev *edev, + struct device_node *dn) { int i; u32 cmd; - struct eeh_dev *edev = (struct eeh_dev *)data; - struct device_node *dn = eeh_dev_to_of_node(edev); for (i = 4; i < 10; i++) eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); /* 12 == Expansion ROM Address */ eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]); -#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) -#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) - eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, SAVED_BYTE(PCI_CACHE_LINE_SIZE)); eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, @@ -599,6 +716,34 @@ static void *eeh_restore_one_device_bars(void *data, void *flag) else cmd &= ~PCI_COMMAND_SERR; eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd); +} + +/** + * eeh_restore_one_device_bars - Restore the Base Address Registers for one device + * @data: EEH device + * @flag: Unused + * + * Loads the PCI configuration space base address registers, + * the expansion ROM base address, the latency timer, and etc. + * from the saved values in the device node. + */ +static void *eeh_restore_one_device_bars(void *data, void *flag) +{ + struct pci_dev *pdev = NULL; + struct eeh_dev *edev = (struct eeh_dev *)data; + struct device_node *dn = eeh_dev_to_of_node(edev); + + /* Trace the PCI bridge */ + if (eeh_probe_mode_dev()) { + pdev = eeh_dev_to_pci_dev(edev); + if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + pdev = NULL; + } + + if (pdev) + eeh_restore_bridge_bars(pdev, edev, dn); + else + eeh_restore_device_bars(edev, dn); return NULL; } @@ -635,19 +780,21 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) struct eeh_dev *edev; struct pci_dev *pdev; - eeh_lock(); - if (pe->type & EEH_PE_PHB) { bus = pe->phb->bus; } else if (pe->type & EEH_PE_BUS || pe->type & EEH_PE_DEVICE) { + if (pe->bus) { + bus = pe->bus; + goto out; + } + edev = list_first_entry(&pe->edevs, struct eeh_dev, list); pdev = eeh_dev_to_pci_dev(edev); if (pdev) bus = pdev->bus; } - eeh_unlock(); - +out: return bus; } diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c index d37708360f2e..e7ae3484918c 100644 --- a/arch/powerpc/platforms/pseries/eeh_sysfs.c +++ b/arch/powerpc/kernel/eeh_sysfs.c @@ -72,4 +72,3 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev) device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); } - diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 8741c854e03d..ab15b8d057ad 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -629,21 +629,43 @@ _GLOBAL(ret_from_except_lite) CURRENT_THREAD_INFO(r9, r1) ld r3,_MSR(r1) +#ifdef CONFIG_PPC_BOOK3E + ld r10,PACACURRENT(r13) +#endif /* CONFIG_PPC_BOOK3E */ ld r4,TI_FLAGS(r9) andi. r3,r3,MSR_PR beq resume_kernel +#ifdef CONFIG_PPC_BOOK3E + lwz r3,(THREAD+THREAD_DBCR0)(r10) +#endif /* CONFIG_PPC_BOOK3E */ /* Check current_thread_info()->flags */ andi. r0,r4,_TIF_USER_WORK_MASK +#ifdef CONFIG_PPC_BOOK3E + bne 1f + /* + * Check to see if the dbcr0 register is set up to debug. + * Use the internal debug mode bit to do this. + */ + andis. r0,r3,DBCR0_IDM@h beq restore - - andi. r0,r4,_TIF_NEED_RESCHED - beq 1f + mfmsr r0 + rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */ + mtmsr r0 + mtspr SPRN_DBCR0,r3 + li r10, -1 + mtspr SPRN_DBSR,r10 + b restore +#else + beq restore +#endif +1: andi. r0,r4,_TIF_NEED_RESCHED + beq 2f bl .restore_interrupts SCHEDULE_USER b .ret_from_except_lite -1: bl .save_nvgprs +2: bl .save_nvgprs bl .restore_interrupts addi r3,r1,STACK_FRAME_OVERHEAD bl .do_notify_resume diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 40e4a17c8ba0..4e00d223b2e3 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -341,10 +341,17 @@ vsx_unavailable_pSeries_1: EXCEPTION_PROLOG_0(PACA_EXGEN) b vsx_unavailable_pSeries +facility_unavailable_trampoline: . = 0xf60 SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXGEN) - b tm_unavailable_pSeries + b facility_unavailable_pSeries + +hv_facility_unavailable_trampoline: + . = 0xf80 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b facility_unavailable_hv #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) @@ -522,8 +529,10 @@ denorm_done: KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) - STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable) + STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60) + STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82) /* * An interrupt came in while soft-disabled. We set paca->irq_happened, then: @@ -793,14 +802,10 @@ system_call_relon_pSeries: STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step) . = 0x4e00 - SET_SCRATCH0(r13) - EXCEPTION_PROLOG_0(PACA_EXGEN) - b h_data_storage_relon_hv + b . /* Can't happen, see v2.07 Book III-S section 6.5 */ . = 0x4e20 - SET_SCRATCH0(r13) - EXCEPTION_PROLOG_0(PACA_EXGEN) - b h_instr_storage_relon_hv + b . /* Can't happen, see v2.07 Book III-S section 6.5 */ . = 0x4e40 SET_SCRATCH0(r13) @@ -808,9 +813,7 @@ system_call_relon_pSeries: b emulation_assist_relon_hv . = 0x4e60 - SET_SCRATCH0(r13) - EXCEPTION_PROLOG_0(PACA_EXGEN) - b hmi_exception_relon_hv + b . /* Can't happen, see v2.07 Book III-S section 6.5 */ . = 0x4e80 SET_SCRATCH0(r13) @@ -835,11 +838,17 @@ vsx_unavailable_relon_pSeries_1: EXCEPTION_PROLOG_0(PACA_EXGEN) b vsx_unavailable_relon_pSeries -tm_unavailable_relon_pSeries_1: +facility_unavailable_relon_trampoline: . = 0x4f60 SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXGEN) - b tm_unavailable_relon_pSeries + b facility_unavailable_relon_pSeries + +hv_facility_unavailable_relon_trampoline: + . = 0x4f80 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b facility_unavailable_relon_hv STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) #ifdef CONFIG_PPC_DENORMALISATION @@ -1165,36 +1174,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) bl .vsx_unavailable_exception b .ret_from_except - .align 7 - .globl tm_unavailable_common -tm_unavailable_common: - EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN) - bl .save_nvgprs - DISABLE_INTS - addi r3,r1,STACK_FRAME_OVERHEAD - bl .tm_unavailable_exception - b .ret_from_except + STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) .align 7 .globl __end_handlers __end_handlers: /* Equivalents to the above handlers for relocation-on interrupt vectors */ - STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage) - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00) - STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage) - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20) STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist) - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40) - STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception) - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60) MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell) - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80) STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) - STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable) + STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) + STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable) #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index a949bdfc9623..f0b47d1a6b0e 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -176,7 +176,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) length_max = 512 ; /* 64 doublewords */ /* DAWR region can't cross 512 boundary */ if ((bp->attr.bp_addr >> 10) != - ((bp->attr.bp_addr + bp->attr.bp_len) >> 10)) + ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) return -EINVAL; } if (info->len > @@ -250,6 +250,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) * we still need to single-step the instruction, but we don't * generate an event. */ + info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; if (!((bp->attr.bp_addr <= dar) && (dar - bp->attr.bp_addr < bp->attr.bp_len))) info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 8220baa46faf..16a7c2326d48 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -205,7 +205,7 @@ static int ibmebus_create_devices(const struct of_device_id *matches) return ret; } -int ibmebus_register_driver(struct of_platform_driver *drv) +int ibmebus_register_driver(struct platform_driver *drv) { /* If the driver uses devices that ibmebus doesn't know, add them */ ibmebus_create_devices(drv->driver.of_match_table); @@ -215,7 +215,7 @@ int ibmebus_register_driver(struct of_platform_driver *drv) } EXPORT_SYMBOL(ibmebus_register_driver); -void ibmebus_unregister_driver(struct of_platform_driver *drv) +void ibmebus_unregister_driver(struct platform_driver *drv) { driver_unregister(&drv->driver); } @@ -338,11 +338,10 @@ static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) static int ibmebus_bus_device_probe(struct device *dev) { int error = -ENODEV; - struct of_platform_driver *drv; + struct platform_driver *drv; struct platform_device *of_dev; - const struct of_device_id *match; - drv = to_of_platform_driver(dev->driver); + drv = to_platform_driver(dev->driver); of_dev = to_platform_device(dev); if (!drv->probe) @@ -350,9 +349,8 @@ static int ibmebus_bus_device_probe(struct device *dev) of_dev_get(of_dev); - match = of_match_device(drv->driver.of_match_table, dev); - if (match) - error = drv->probe(of_dev, match); + if (of_driver_match_device(dev, dev->driver)) + error = drv->probe(of_dev); if (error) of_dev_put(of_dev); @@ -362,7 +360,7 @@ static int ibmebus_bus_device_probe(struct device *dev) static int ibmebus_bus_device_remove(struct device *dev) { struct platform_device *of_dev = to_platform_device(dev); - struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + struct platform_driver *drv = to_platform_driver(dev->driver); if (dev->driver && drv->remove) drv->remove(of_dev); @@ -372,7 +370,7 @@ static int ibmebus_bus_device_remove(struct device *dev) static void ibmebus_bus_device_shutdown(struct device *dev) { struct platform_device *of_dev = to_platform_device(dev); - struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + struct platform_driver *drv = to_platform_driver(dev->driver); if (dev->driver && drv->shutdown) drv->shutdown(of_dev); @@ -419,7 +417,7 @@ struct device_attribute ibmebus_bus_device_attrs[] = { static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) { struct platform_device *of_dev = to_platform_device(dev); - struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + struct platform_driver *drv = to_platform_driver(dev->driver); int ret = 0; if (dev->driver && drv->suspend) @@ -430,7 +428,7 @@ static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) static int ibmebus_bus_legacy_resume(struct device *dev) { struct platform_device *of_dev = to_platform_device(dev); - struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + struct platform_driver *drv = to_platform_driver(dev->driver); int ret = 0; if (dev->driver && drv->resume) diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 939ea7ef0dc8..d7216c9abda1 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -85,7 +85,7 @@ int powersave_nap; /* * Register the sysctl to set/clear powersave_nap. */ -static ctl_table powersave_nap_ctl_table[]={ +static struct ctl_table powersave_nap_ctl_table[] = { { .procname = "powersave-nap", .data = &powersave_nap, @@ -95,7 +95,7 @@ static ctl_table powersave_nap_ctl_table[]={ }, {} }; -static ctl_table powersave_nap_sysctl_root[] = { +static struct ctl_table powersave_nap_sysctl_root[] = { { .procname = "kernel", .mode = 0555, diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 50e90b7e7139..fa0b54b2a362 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -55,6 +55,7 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) { + unsigned hugepage_shift; struct iowa_bus *bus; int token; @@ -70,11 +71,17 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) return NULL; - ptep = find_linux_pte(init_mm.pgd, vaddr); + ptep = find_linux_pte_or_hugepte(init_mm.pgd, vaddr, + &hugepage_shift); if (ptep == NULL) paddr = 0; - else + else { + /* + * we don't have hugepages backing iomem + */ + WARN_ON(hugepage_shift); paddr = pte_pfn(*ptep) << PAGE_SHIFT; + } bus = iowa_pci_find(vaddr, paddr); if (bus == NULL) diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index c0d0dbddfba1..b20ff173a671 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -36,6 +36,8 @@ #include <linux/hash.h> #include <linux/fault-inject.h> #include <linux/pci.h> +#include <linux/iommu.h> +#include <linux/sched.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/iommu.h> @@ -44,6 +46,7 @@ #include <asm/kdump.h> #include <asm/fadump.h> #include <asm/vio.h> +#include <asm/tce.h> #define DBG(...) @@ -724,6 +727,13 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) if (tbl->it_offset == 0) clear_bit(0, tbl->it_map); +#ifdef CONFIG_IOMMU_API + if (tbl->it_group) { + iommu_group_put(tbl->it_group); + BUG_ON(tbl->it_group); + } +#endif + /* verify that table contains no entries */ if (!bitmap_empty(tbl->it_map, tbl->it_size)) pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name); @@ -860,3 +870,316 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } } + +#ifdef CONFIG_IOMMU_API +/* + * SPAPR TCE API + */ +static void group_release(void *iommu_data) +{ + struct iommu_table *tbl = iommu_data; + tbl->it_group = NULL; +} + +void iommu_register_group(struct iommu_table *tbl, + int pci_domain_number, unsigned long pe_num) +{ + struct iommu_group *grp; + char *name; + + grp = iommu_group_alloc(); + if (IS_ERR(grp)) { + pr_warn("powerpc iommu api: cannot create new group, err=%ld\n", + PTR_ERR(grp)); + return; + } + tbl->it_group = grp; + iommu_group_set_iommudata(grp, tbl, group_release); + name = kasprintf(GFP_KERNEL, "domain%d-pe%lx", + pci_domain_number, pe_num); + if (!name) + return; + iommu_group_set_name(grp, name); + kfree(name); +} + +enum dma_data_direction iommu_tce_direction(unsigned long tce) +{ + if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE)) + return DMA_BIDIRECTIONAL; + else if (tce & TCE_PCI_READ) + return DMA_TO_DEVICE; + else if (tce & TCE_PCI_WRITE) + return DMA_FROM_DEVICE; + else + return DMA_NONE; +} +EXPORT_SYMBOL_GPL(iommu_tce_direction); + +void iommu_flush_tce(struct iommu_table *tbl) +{ + /* Flush/invalidate TLB caches if necessary */ + if (ppc_md.tce_flush) + ppc_md.tce_flush(tbl); + + /* Make sure updates are seen by hardware */ + mb(); +} +EXPORT_SYMBOL_GPL(iommu_flush_tce); + +int iommu_tce_clear_param_check(struct iommu_table *tbl, + unsigned long ioba, unsigned long tce_value, + unsigned long npages) +{ + /* ppc_md.tce_free() does not support any value but 0 */ + if (tce_value) + return -EINVAL; + + if (ioba & ~IOMMU_PAGE_MASK) + return -EINVAL; + + ioba >>= IOMMU_PAGE_SHIFT; + if (ioba < tbl->it_offset) + return -EINVAL; + + if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check); + +int iommu_tce_put_param_check(struct iommu_table *tbl, + unsigned long ioba, unsigned long tce) +{ + if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ))) + return -EINVAL; + + if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ)) + return -EINVAL; + + if (ioba & ~IOMMU_PAGE_MASK) + return -EINVAL; + + ioba >>= IOMMU_PAGE_SHIFT; + if (ioba < tbl->it_offset) + return -EINVAL; + + if ((ioba + 1) > (tbl->it_offset + tbl->it_size)) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_tce_put_param_check); + +unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry) +{ + unsigned long oldtce; + struct iommu_pool *pool = get_pool(tbl, entry); + + spin_lock(&(pool->lock)); + + oldtce = ppc_md.tce_get(tbl, entry); + if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)) + ppc_md.tce_free(tbl, entry, 1); + else + oldtce = 0; + + spin_unlock(&(pool->lock)); + + return oldtce; +} +EXPORT_SYMBOL_GPL(iommu_clear_tce); + +int iommu_clear_tces_and_put_pages(struct iommu_table *tbl, + unsigned long entry, unsigned long pages) +{ + unsigned long oldtce; + struct page *page; + + for ( ; pages; --pages, ++entry) { + oldtce = iommu_clear_tce(tbl, entry); + if (!oldtce) + continue; + + page = pfn_to_page(oldtce >> PAGE_SHIFT); + WARN_ON(!page); + if (page) { + if (oldtce & TCE_PCI_WRITE) + SetPageDirty(page); + put_page(page); + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_clear_tces_and_put_pages); + +/* + * hwaddr is a kernel virtual address here (0xc... bazillion), + * tce_build converts it to a physical address. + */ +int iommu_tce_build(struct iommu_table *tbl, unsigned long entry, + unsigned long hwaddr, enum dma_data_direction direction) +{ + int ret = -EBUSY; + unsigned long oldtce; + struct iommu_pool *pool = get_pool(tbl, entry); + + spin_lock(&(pool->lock)); + + oldtce = ppc_md.tce_get(tbl, entry); + /* Add new entry if it is not busy */ + if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))) + ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL); + + spin_unlock(&(pool->lock)); + + /* if (unlikely(ret)) + pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", + __func__, hwaddr, entry << IOMMU_PAGE_SHIFT, + hwaddr, ret); */ + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_tce_build); + +int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry, + unsigned long tce) +{ + int ret; + struct page *page = NULL; + unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK; + enum dma_data_direction direction = iommu_tce_direction(tce); + + ret = get_user_pages_fast(tce & PAGE_MASK, 1, + direction != DMA_TO_DEVICE, &page); + if (unlikely(ret != 1)) { + /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n", + tce, entry << IOMMU_PAGE_SHIFT, ret); */ + return -EFAULT; + } + hwaddr = (unsigned long) page_address(page) + offset; + + ret = iommu_tce_build(tbl, entry, hwaddr, direction); + if (ret) + put_page(page); + + if (ret < 0) + pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", + __func__, entry << IOMMU_PAGE_SHIFT, tce, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_put_tce_user_mode); + +int iommu_take_ownership(struct iommu_table *tbl) +{ + unsigned long sz = (tbl->it_size + 7) >> 3; + + if (tbl->it_offset == 0) + clear_bit(0, tbl->it_map); + + if (!bitmap_empty(tbl->it_map, tbl->it_size)) { + pr_err("iommu_tce: it_map is not empty"); + return -EBUSY; + } + + memset(tbl->it_map, 0xff, sz); + iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_take_ownership); + +void iommu_release_ownership(struct iommu_table *tbl) +{ + unsigned long sz = (tbl->it_size + 7) >> 3; + + iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); + memset(tbl->it_map, 0, sz); + + /* Restore bit#0 set by iommu_init_table() */ + if (tbl->it_offset == 0) + set_bit(0, tbl->it_map); +} +EXPORT_SYMBOL_GPL(iommu_release_ownership); + +static int iommu_add_device(struct device *dev) +{ + struct iommu_table *tbl; + int ret = 0; + + if (WARN_ON(dev->iommu_group)) { + pr_warn("iommu_tce: device %s is already in iommu group %d, skipping\n", + dev_name(dev), + iommu_group_id(dev->iommu_group)); + return -EBUSY; + } + + tbl = get_iommu_table_base(dev); + if (!tbl || !tbl->it_group) { + pr_debug("iommu_tce: skipping device %s with no tbl\n", + dev_name(dev)); + return 0; + } + + pr_debug("iommu_tce: adding %s to iommu group %d\n", + dev_name(dev), iommu_group_id(tbl->it_group)); + + ret = iommu_group_add_device(tbl->it_group, dev); + if (ret < 0) + pr_err("iommu_tce: %s has not been added, ret=%d\n", + dev_name(dev), ret); + + return ret; +} + +static void iommu_del_device(struct device *dev) +{ + iommu_group_remove_device(dev); +} + +static int iommu_bus_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + return iommu_add_device(dev); + case BUS_NOTIFY_DEL_DEVICE: + iommu_del_device(dev); + return 0; + default: + return 0; + } +} + +static struct notifier_block tce_iommu_bus_nb = { + .notifier_call = iommu_bus_notifier, +}; + +static int __init tce_iommu_init(void) +{ + struct pci_dev *pdev = NULL; + + BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE); + + for_each_pci_dev(pdev) + iommu_add_device(&pdev->dev); + + bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); + return 0; +} + +subsys_initcall_sync(tce_iommu_init); + +#else + +void iommu_register_group(struct iommu_table *tbl, + int pci_domain_number, unsigned long pe_num) +{ +} + +#endif /* CONFIG_IOMMU_API */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ea185e0b3cae..2e51cde616d2 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -116,8 +116,6 @@ static inline notrace int decrementer_check_overflow(void) u64 now = get_tb_or_rtc(); u64 *next_tb = &__get_cpu_var(decrementers_next_tb); - if (now >= *next_tb) - set_dec(1); return now >= *next_tb; } diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 11f5b03a0b06..2156ea90eb54 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -36,12 +36,6 @@ #include <asm/sstep.h> #include <asm/uaccess.h> -#ifdef CONFIG_PPC_ADV_DEBUG_REGS -#define MSR_SINGLESTEP (MSR_DE) -#else -#define MSR_SINGLESTEP (MSR_SE) -#endif - DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -104,19 +98,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p) static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { - /* We turn off async exceptions to ensure that the single step will - * be for the instruction we have the kprobe on, if we dont its - * possible we'd get the single step reported for an exception handler - * like Decrementer or External Interrupt */ - regs->msr &= ~MSR_EE; - regs->msr |= MSR_SINGLESTEP; -#ifdef CONFIG_PPC_ADV_DEBUG_REGS - regs->msr &= ~MSR_CE; - mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); -#ifdef CONFIG_PPC_47x - isync(); -#endif -#endif + enable_single_step(regs); /* * On powerpc we should single step on the original diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 6782221d49bd..db28032e320e 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -750,13 +750,8 @@ EXPORT_SYMBOL_GPL(kvm_hypercall); static __init void kvm_free_tmp(void) { - unsigned long start, end; - - start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK; - end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; - - /* Free the tmp space we don't need */ - free_reserved_area(start, end, 0, NULL); + free_reserved_area(&kvm_tmp[kvm_tmp_index], + &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); } static int __init kvm_guest_init(void) diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 48fbc2b97e95..8213ee1eb05a 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -84,22 +84,30 @@ static ssize_t dev_nvram_read(struct file *file, char __user *buf, char *tmp = NULL; ssize_t size; - ret = -ENODEV; - if (!ppc_md.nvram_size) + if (!ppc_md.nvram_size) { + ret = -ENODEV; goto out; + } - ret = 0; size = ppc_md.nvram_size(); - if (*ppos >= size || size < 0) + if (size < 0) { + ret = size; + goto out; + } + + if (*ppos >= size) { + ret = 0; goto out; + } count = min_t(size_t, count, size - *ppos); count = min(count, PAGE_SIZE); - ret = -ENOMEM; tmp = kmalloc(count, GFP_KERNEL); - if (!tmp) + if (!tmp) { + ret = -ENOMEM; goto out; + } ret = ppc_md.nvram_read(tmp, count, ppos); if (ret <= 0) diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c new file mode 100644 index 000000000000..3f608800c06b --- /dev/null +++ b/arch/powerpc/kernel/pci-hotplug.c @@ -0,0 +1,111 @@ +/* + * Derived from "arch/powerpc/platforms/pseries/pci_dlpar.c" + * + * Copyright (C) 2003 Linda Xie <lxie@us.ibm.com> + * Copyright (C) 2005 International Business Machines + * + * Updates, 2005, John Rose <johnrose@austin.ibm.com> + * Updates, 2005, Linas Vepstas <linas@austin.ibm.com> + * Updates, 2013, Gavin Shan <shangw@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/pci.h> +#include <linux/export.h> +#include <asm/pci-bridge.h> +#include <asm/ppc-pci.h> +#include <asm/firmware.h> +#include <asm/eeh.h> + +/** + * __pcibios_remove_pci_devices - remove all devices under this bus + * @bus: the indicated PCI bus + * @purge_pe: destroy the PE on removal of PCI devices + * + * Remove all of the PCI devices under this bus both from the + * linux pci device tree, and from the powerpc EEH address cache. + * By default, the corresponding PE will be destroied during the + * normal PCI hotplug path. For PCI hotplug during EEH recovery, + * the corresponding PE won't be destroied and deallocated. + */ +void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe) +{ + struct pci_dev *dev, *tmp; + struct pci_bus *child_bus; + + /* First go down child busses */ + list_for_each_entry(child_bus, &bus->children, node) + __pcibios_remove_pci_devices(child_bus, purge_pe); + + pr_debug("PCI: Removing devices on bus %04x:%02x\n", + pci_domain_nr(bus), bus->number); + list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { + pr_debug(" * Removing %s...\n", pci_name(dev)); + eeh_remove_bus_device(dev, purge_pe); + pci_stop_and_remove_bus_device(dev); + } +} + +/** + * pcibios_remove_pci_devices - remove all devices under this bus + * @bus: the indicated PCI bus + * + * Remove all of the PCI devices under this bus both from the + * linux pci device tree, and from the powerpc EEH address cache. + */ +void pcibios_remove_pci_devices(struct pci_bus *bus) +{ + __pcibios_remove_pci_devices(bus, 1); +} +EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); + +/** + * pcibios_add_pci_devices - adds new pci devices to bus + * @bus: the indicated PCI bus + * + * This routine will find and fixup new pci devices under + * the indicated bus. This routine presumes that there + * might already be some devices under this bridge, so + * it carefully tries to add only new devices. (And that + * is how this routine differs from other, similar pcibios + * routines.) + */ +void pcibios_add_pci_devices(struct pci_bus * bus) +{ + int slotno, num, mode, pass, max; + struct pci_dev *dev; + struct device_node *dn = pci_bus_to_OF_node(bus); + + eeh_add_device_tree_early(dn); + + mode = PCI_PROBE_NORMAL; + if (ppc_md.pci_probe_mode) + mode = ppc_md.pci_probe_mode(bus); + + if (mode == PCI_PROBE_DEVTREE) { + /* use ofdt-based probe */ + of_rescan_bus(dn, bus); + } else if (mode == PCI_PROBE_NORMAL) { + /* use legacy probe */ + slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); + num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); + if (!num) + return; + pcibios_setup_bus_devices(bus); + max = bus->busn_res.start; + for (pass = 0; pass < 2; pass++) { + list_for_each_entry(dev, &bus->devices, bus_list) { + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || + dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + max = pci_scan_bridge(bus, dev, + max, pass); + } + } + } + pcibios_finish_adding_to_bus(bus); +} +EXPORT_SYMBOL_GPL(pcibios_add_pci_devices); diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 2a67e9baa59f..6b0ba5854d99 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -128,7 +128,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, const char *type; struct pci_slot *slot; - dev = alloc_pci_dev(); + dev = pci_alloc_dev(bus); if (!dev) return NULL; type = of_get_property(node, "device_type", NULL); @@ -137,7 +137,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); - dev->bus = bus; dev->dev.of_node = of_node_get(node); dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; @@ -165,7 +164,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, pr_debug(" class: 0x%x\n", dev->class); pr_debug(" revision: 0x%x\n", dev->revision); - dev->current_state = 4; /* unknown power state */ + dev->current_state = PCI_UNKNOWN; /* unknown power state */ dev->error_state = pci_channel_io_normal; dev->dma_mask = 0xffffffff; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 076d1242507a..c517dbe705fd 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -916,7 +916,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); + *dst = *src; + + clear_task_ebb(dst); + return 0; } diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 8b6f7a99cce2..eb23ac92abb9 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -559,6 +559,35 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start, } #endif +static void __init early_reserve_mem_dt(void) +{ + unsigned long i, len, dt_root; + const __be32 *prop; + + dt_root = of_get_flat_dt_root(); + + prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len); + + if (!prop) + return; + + DBG("Found new-style reserved-ranges\n"); + + /* Each reserved range is an (address,size) pair, 2 cells each, + * totalling 4 cells per range. */ + for (i = 0; i < len / (sizeof(*prop) * 4); i++) { + u64 base, size; + + base = of_read_number(prop + (i * 4) + 0, 2); + size = of_read_number(prop + (i * 4) + 2, 2); + + if (size) { + DBG("reserving: %llx -> %llx\n", base, size); + memblock_reserve(base, size); + } + } +} + static void __init early_reserve_mem(void) { u64 base, size; @@ -574,12 +603,16 @@ static void __init early_reserve_mem(void) self_size = initial_boot_params->totalsize; memblock_reserve(self_base, self_size); + /* Look for the new "reserved-regions" property in the DT */ + early_reserve_mem_dt(); + #ifdef CONFIG_BLK_DEV_INITRD - /* then reserve the initrd, if any */ - if (initrd_start && (initrd_end > initrd_start)) + /* Then reserve the initrd, if any */ + if (initrd_start && (initrd_end > initrd_start)) { memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), _ALIGN_UP(initrd_end, PAGE_SIZE) - _ALIGN_DOWN(initrd_start, PAGE_SIZE)); + } #endif /* CONFIG_BLK_DEV_INITRD */ #ifdef CONFIG_PPC32 @@ -591,6 +624,8 @@ static void __init early_reserve_mem(void) u32 base_32, size_32; u32 *reserve_map_32 = (u32 *)reserve_map; + DBG("Found old 32-bit reserve map\n"); + while (1) { base_32 = *(reserve_map_32++); size_32 = *(reserve_map_32++); @@ -605,6 +640,9 @@ static void __init early_reserve_mem(void) return; } #endif + DBG("Processing reserve map\n"); + + /* Handle the reserve map in the fdt blob if it exists */ while (1) { base = *(reserve_map++); size = *(reserve_map++); diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 98c2fc198712..9a0d24c390a3 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -975,16 +975,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; hw_brk.len = 8; #ifdef CONFIG_HAVE_HW_BREAKPOINT - if (ptrace_get_breakpoints(task) < 0) - return -ESRCH; - bp = thread->ptrace_bps[0]; if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) { if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; } - ptrace_put_breakpoints(task); return 0; } if (bp) { @@ -997,11 +993,9 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, ret = modify_user_hw_breakpoint(bp, &attr); if (ret) { - ptrace_put_breakpoints(task); return ret; } thread->ptrace_bps[0] = bp; - ptrace_put_breakpoints(task); thread->hw_brk = hw_brk; return 0; } @@ -1016,12 +1010,9 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, ptrace_triggered, NULL, task); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; - ptrace_put_breakpoints(task); return PTR_ERR(bp); } - ptrace_put_breakpoints(task); - #endif /* CONFIG_HAVE_HW_BREAKPOINT */ task->thread.hw_brk = hw_brk; #else /* CONFIG_PPC_ADV_DEBUG_REGS */ @@ -1440,24 +1431,19 @@ static long ppc_set_hwdebug(struct task_struct *child, if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) brk.type |= HW_BRK_TYPE_WRITE; #ifdef CONFIG_HAVE_HW_BREAKPOINT - if (ptrace_get_breakpoints(child) < 0) - return -ESRCH; - /* * Check if the request is for 'range' breakpoints. We can * support it if range < 8 bytes. */ - if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) { + if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) len = bp_info->addr2 - bp_info->addr; - } else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) { - ptrace_put_breakpoints(child); + else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) + len = 1; + else return -EINVAL; - } bp = thread->ptrace_bps[0]; - if (bp) { - ptrace_put_breakpoints(child); + if (bp) return -ENOSPC; - } /* Create a new breakpoint request if one doesn't exist already */ hw_breakpoint_init(&attr); @@ -1469,11 +1455,9 @@ static long ppc_set_hwdebug(struct task_struct *child, ptrace_triggered, NULL, child); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; - ptrace_put_breakpoints(child); return PTR_ERR(bp); } - ptrace_put_breakpoints(child); return 1; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ @@ -1517,16 +1501,12 @@ static long ppc_del_hwdebug(struct task_struct *child, long data) return -EINVAL; #ifdef CONFIG_HAVE_HW_BREAKPOINT - if (ptrace_get_breakpoints(child) < 0) - return -ESRCH; - bp = thread->ptrace_bps[0]; if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; } else ret = -ENOENT; - ptrace_put_breakpoints(child); return ret; #else /* CONFIG_HAVE_HW_BREAKPOINT */ if (child->thread.hw_brk.address == 0) diff --git a/arch/powerpc/kernel/reloc_32.S b/arch/powerpc/kernel/reloc_32.S index ef46ba6e094f..f366fedb0872 100644 --- a/arch/powerpc/kernel/reloc_32.S +++ b/arch/powerpc/kernel/reloc_32.S @@ -166,7 +166,7 @@ ha16: /* R_PPC_ADDR16_LO */ lo16: cmpwi r4, R_PPC_ADDR16_LO - bne nxtrela + bne unknown_type lwz r4, 0(r9) /* r_offset */ lwz r0, 8(r9) /* r_addend */ add r0, r0, r3 @@ -191,6 +191,7 @@ nxtrela: dcbst r4,r7 sync /* Ensure the data is flushed before icbi */ icbi r4,r7 +unknown_type: cmpwi r8, 0 /* relasz = 0 ? */ ble done add r9, r9, r6 /* move to next entry in the .rela table */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 52add6f3e201..80b5ef403f68 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1172,7 +1172,7 @@ int __init early_init_dt_scan_rtas(unsigned long node, static arch_spinlock_t timebase_lock; static u64 timebase = 0; -void __cpuinit rtas_give_timebase(void) +void rtas_give_timebase(void) { unsigned long flags; @@ -1189,7 +1189,7 @@ void __cpuinit rtas_give_timebase(void) local_irq_restore(flags); } -void __cpuinit rtas_take_timebase(void) +void rtas_take_timebase(void) { while (!timebase) barrier(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 8a022f5de0ef..fe6a58c9f0b7 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -76,7 +76,7 @@ #endif int boot_cpuid = 0; -int __initdata spinning_secondaries; +int spinning_secondaries; u64 ppc64_pft_size; /* Pick defaults since we might want to patch instructions diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 201385c3a1ae..0f83122e6676 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -407,7 +407,8 @@ inline unsigned long copy_transact_fpr_from_user(struct task_struct *task, * altivec/spe instructions at some point. */ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, - int sigret, int ctx_has_vsx_region) + struct mcontext __user *tm_frame, int sigret, + int ctx_has_vsx_region) { unsigned long msr = regs->msr; @@ -475,6 +476,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, if (__put_user(msr, &frame->mc_gregs[PT_MSR])) return 1; + /* We need to write 0 the MSR top 32 bits in the tm frame so that we + * can check it on the restore to see if TM is active + */ + if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR])) + return 1; + if (sigret) { /* Set up the sigreturn trampoline: li r0,sigret; sc */ if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) @@ -747,7 +754,7 @@ static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *tm_sr) { long err; - unsigned long msr; + unsigned long msr, msr_hi; #ifdef CONFIG_VSX int i; #endif @@ -852,8 +859,11 @@ static long restore_tm_user_regs(struct pt_regs *regs, tm_enable(); /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(¤t->thread, msr); - /* The task has moved into TM state S, so ensure MSR reflects this */ - regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S; + /* Get the top half of the MSR */ + if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) + return 1; + /* Pull in MSR TM from user context */ + regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { @@ -952,6 +962,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, { struct rt_sigframe __user *rt_sf; struct mcontext __user *frame; + struct mcontext __user *tm_frame = NULL; void __user *addr; unsigned long newsp = 0; int sigret; @@ -985,23 +996,24 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM + tm_frame = &rt_sf->uc_transact.uc_mcontext; if (MSR_TM_ACTIVE(regs->msr)) { - if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext, - &rt_sf->uc_transact.uc_mcontext, sigret)) + if (save_tm_user_regs(regs, frame, tm_frame, sigret)) goto badframe; } else #endif - if (save_user_regs(regs, frame, sigret, 1)) + { + if (save_user_regs(regs, frame, tm_frame, sigret, 1)) goto badframe; + } regs->link = tramp; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) { if (__put_user((unsigned long)&rt_sf->uc_transact, &rt_sf->uc.uc_link) - || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext), - &rt_sf->uc_transact.uc_regs)) + || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs)) goto badframe; } else @@ -1170,7 +1182,7 @@ long sys_swapcontext(struct ucontext __user *old_ctx, mctx = (struct mcontext __user *) ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) - || save_user_regs(regs, mctx, 0, ctx_has_vsx_region) + || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region) || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked) || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs)) return -EFAULT; @@ -1233,7 +1245,7 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR])) goto bad; - if (MSR_TM_SUSPENDED(msr_hi<<32)) { + if (MSR_TM_ACTIVE(msr_hi<<32)) { /* We only recheckpoint on return if we're * transaction. */ @@ -1392,6 +1404,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, { struct sigcontext __user *sc; struct sigframe __user *frame; + struct mcontext __user *tm_mctx = NULL; unsigned long newsp = 0; int sigret; unsigned long tramp; @@ -1425,6 +1438,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM + tm_mctx = &frame->mctx_transact; if (MSR_TM_ACTIVE(regs->msr)) { if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact, sigret)) @@ -1432,8 +1446,10 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, } else #endif - if (save_user_regs(regs, &frame->mctx, sigret, 1)) + { + if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1)) goto badframe; + } regs->link = tramp; @@ -1481,16 +1497,22 @@ badframe: long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, struct pt_regs *regs) { + struct sigframe __user *sf; struct sigcontext __user *sc; struct sigcontext sigctx; struct mcontext __user *sr; void __user *addr; sigset_t set; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + struct mcontext __user *mcp, *tm_mcp; + unsigned long msr_hi; +#endif /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; - sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); + sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); + sc = &sf->sctx; addr = sc; if (copy_from_user(&sigctx, sc, sizeof(sigctx))) goto badframe; @@ -1507,11 +1529,25 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, #endif set_current_blocked(&set); - sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); - addr = sr; - if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) - || restore_user_regs(regs, sr, 1)) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + mcp = (struct mcontext __user *)&sf->mctx; + tm_mcp = (struct mcontext __user *)&sf->mctx_transact; + if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR])) goto badframe; + if (MSR_TM_ACTIVE(msr_hi<<32)) { + if (!cpu_has_feature(CPU_FTR_TM)) + goto badframe; + if (restore_tm_user_regs(regs, mcp, tm_mcp)) + goto badframe; + } else +#endif + { + sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); + addr = sr; + if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) + || restore_user_regs(regs, sr, 1)) + goto badframe; + } set_thread_flag(TIF_RESTOREALL); return 0; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 345947367ec0..887e99d85bc2 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -410,6 +410,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); + /* pull in MSR TM from user context */ + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); + + /* pull in MSR LE from user context */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* The following non-GPR non-FPR non-VR state is also checkpointed: */ @@ -505,8 +509,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, tm_enable(); /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(¤t->thread, msr); - /* The task has moved into TM state S, so ensure MSR reflects this: */ - regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { @@ -654,7 +656,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) goto badframe; - if (MSR_TM_SUSPENDED(msr)) { + if (MSR_TM_ACTIVE(msr)) { /* We recheckpoint on return. */ struct ucontext __user *uc_transact; if (__get_user(uc_transact, &uc->uc_link)) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index ee7ac5e6e28a..38b0ba65a735 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -480,7 +480,7 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) secondary_ti = current_set[cpu] = ti; } -int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int rc, c; @@ -610,7 +610,7 @@ static struct device_node *cpu_to_l2cache(int cpu) } /* Activate a secondary processor. */ -__cpuinit void start_secondary(void *unused) +void start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; @@ -637,12 +637,10 @@ __cpuinit void start_secondary(void *unused) vdso_getcpu_init(); #endif - notify_cpu_starting(cpu); - set_cpu_online(cpu, true); /* Update sibling maps */ base = cpu_first_thread_sibling(cpu); for (i = 0; i < threads_per_core; i++) { - if (cpu_is_offline(base + i)) + if (cpu_is_offline(base + i) && (cpu != base + i)) continue; cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); @@ -667,6 +665,10 @@ __cpuinit void start_secondary(void *unused) } of_node_put(l2_cache); + smp_wmb(); + notify_cpu_starting(cpu); + set_cpu_online(cpu, true); + local_irq_enable(); cpu_startup_entry(CPUHP_ONLINE); diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index e68a84568b8b..27a90b99ef67 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -341,7 +341,7 @@ static struct device_attribute pa6t_attrs[] = { #endif /* HAS_PPC_PMC_PA6T */ #endif /* HAS_PPC_PMC_CLASSIC */ -static void __cpuinit register_cpu_online(unsigned int cpu) +static void register_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); struct device *s = &c->dev; @@ -502,7 +502,7 @@ ssize_t arch_cpu_release(const char *buf, size_t count) #endif /* CONFIG_HOTPLUG_CPU */ -static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, +static int sysfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; @@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata sysfs_cpu_nb = { +static struct notifier_block sysfs_cpu_nb = { .notifier_call = sysfs_cpu_notify, }; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 5fc29ad7e26f..65ab9e909377 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -631,7 +631,6 @@ static int __init get_freq(char *name, int cells, unsigned long *val) return found; } -/* should become __cpuinit when secondary_cpu_time_init also is */ void start_cpu_decrementer(void) { #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 2da67e7a16d5..51be8fb24803 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -112,9 +112,18 @@ _GLOBAL(tm_reclaim) std r3, STACK_PARAM(0)(r1) SAVE_NVGPRS(r1) + /* We need to setup MSR for VSX register save instructions. Here we + * also clear the MSR RI since when we do the treclaim, we won't have a + * valid kernel pointer for a while. We clear RI here as it avoids + * adding another mtmsr closer to the treclaim. This makes the region + * maked as non-recoverable wider than it needs to be but it saves on + * inserting another mtmsrd later. + */ mfmsr r14 mr r15, r14 ori r15, r15, MSR_FP + li r16, MSR_RI + andc r15, r15, r16 oris r15, r15, MSR_VEC@h #ifdef CONFIG_VSX BEGIN_FTR_SECTION @@ -349,9 +358,10 @@ restore_gprs: mtcr r5 mtxer r6 - /* MSR and flags: We don't change CRs, and we don't need to alter - * MSR. + /* Clear the MSR RI since we are about to change R1. EE is already off */ + li r4, 0 + mtmsrd r4, 1 REST_4GPRS(0, r7) /* GPR0-3 */ REST_GPR(4, r7) /* GPR4-6 */ @@ -377,6 +387,10 @@ restore_gprs: GET_PACA(r13) GET_SCRATCH0(r1) + /* R1 is restored, so we are recoverable again. EE is still off */ + li r4, MSR_RI + mtmsrd r4, 1 + REST_NVGPRS(r1) addi r1, r1, TM_FRAME_SIZE diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index c0e5caf8ccc7..bf33c22e38a4 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -866,6 +866,10 @@ static int emulate_string_inst(struct pt_regs *regs, u32 instword) u8 val; u32 shift = 8 * (3 - (pos & 0x3)); + /* if process is 32-bit, clear upper 32 bits of EA */ + if ((regs->msr & MSR_64BIT) == 0) + EA &= 0xFFFFFFFF; + switch ((instword & PPC_INST_STRING_MASK)) { case PPC_INST_LSWX: case PPC_INST_LSWI: @@ -1125,7 +1129,17 @@ void __kprobes program_check_exception(struct pt_regs *regs) * ESR_DST (!?) or 0. In the process of chasing this with the * hardware people - not sure if it can happen on any illegal * instruction or only on FP instructions, whether there is a - * pattern to occurrences etc. -dgibson 31/Mar/2003 */ + * pattern to occurrences etc. -dgibson 31/Mar/2003 + */ + + /* + * If we support a HW FPU, we need to ensure the FP state + * if flushed into the thread_struct before attempting + * emulation + */ +#ifdef CONFIG_PPC_FPU + flush_fp_to_thread(current); +#endif switch (do_mathemu(regs)) { case 0: emulate_single_step(regs); @@ -1282,25 +1296,50 @@ void vsx_unavailable_exception(struct pt_regs *regs) die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); } -void tm_unavailable_exception(struct pt_regs *regs) +void facility_unavailable_exception(struct pt_regs *regs) { + static char *facility_strings[] = { + "FPU", + "VMX/VSX", + "DSCR", + "PMU SPRs", + "BHRB", + "TM", + "AT", + "EBB", + "TAR", + }; + char *facility, *prefix; + u64 value; + + if (regs->trap == 0xf60) { + value = mfspr(SPRN_FSCR); + prefix = ""; + } else { + value = mfspr(SPRN_HFSCR); + prefix = "Hypervisor "; + } + + value = value >> 56; + /* We restore the interrupt state now */ if (!arch_irq_disabled_regs(regs)) local_irq_enable(); - /* Currently we never expect a TMU exception. Catch - * this and kill the process! - */ - printk(KERN_EMERG "Unexpected TM unavailable exception at %lx " - "(msr %lx)\n", - regs->nip, regs->msr); + if (value < ARRAY_SIZE(facility_strings)) + facility = facility_strings[value]; + else + facility = "unknown"; + + pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", + prefix, facility, regs->nip, regs->msr); if (user_mode(regs)) { _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return; } - die("Unexpected TM unavailable exception", regs, SIGABRT); + die("Unexpected facility unavailable exception", regs, SIGABRT); } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -1396,8 +1435,7 @@ void performance_monitor_exception(struct pt_regs *regs) void SoftwareEmulation(struct pt_regs *regs) { extern int do_mathemu(struct pt_regs *); - extern int Soft_emulate_8xx(struct pt_regs *); -#if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU) +#if defined(CONFIG_MATH_EMULATION) int errcode; #endif @@ -1430,23 +1468,6 @@ void SoftwareEmulation(struct pt_regs *regs) _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return; } - -#elif defined(CONFIG_8XX_MINIMAL_FPEMU) - errcode = Soft_emulate_8xx(regs); - if (errcode >= 0) - PPC_WARN_EMULATED(8xx, regs); - - switch (errcode) { - case 0: - emulate_single_step(regs); - return; - case 1: - _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - return; - case -EFAULT: - _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); - return; - } #else _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); #endif @@ -1796,8 +1817,6 @@ struct ppc_emulated ppc_emulated = { WARN_EMULATED_SETUP(unaligned), #ifdef CONFIG_MATH_EMULATION WARN_EMULATED_SETUP(math), -#elif defined(CONFIG_8XX_MINIMAL_FPEMU) - WARN_EMULATED_SETUP(8xx), #endif #ifdef CONFIG_VSX WARN_EMULATED_SETUP(vsx), diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 9d3fdcd66290..a15837519dca 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -50,7 +50,7 @@ void __init udbg_early_init(void) udbg_init_debug_beat(); #elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE) udbg_init_pas_realmode(); -#elif defined(CONFIG_BOOTX_TEXT) +#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX) udbg_init_btext(); #elif defined(CONFIG_PPC_EARLY_DEBUG_44x) /* PPC44x debug */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index d4f463ac65b1..1d9c92621b36 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -711,7 +711,7 @@ static void __init vdso_setup_syscall_map(void) } #ifdef CONFIG_PPC64 -int __cpuinit vdso_getcpu_init(void) +int vdso_getcpu_init(void) { unsigned long cpu, node, val; diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index b350d9494b26..e5240524bf6c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -34,7 +34,7 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, - MMU_PAGE_4K, MMU_SEGSIZE_256M, + MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M, false); } diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 7eb5ddab1203..b7a1911d108a 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -675,6 +675,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } /* if the guest wants write access, see if that is OK */ if (!writing && hpte_is_writable(r)) { + unsigned int hugepage_shift; pte_t *ptep, pte; /* @@ -683,9 +684,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, */ rcu_read_lock_sched(); ptep = find_linux_pte_or_hugepte(current->mm->pgd, - hva, NULL); - if (ptep && pte_present(*ptep)) { - pte = kvmppc_read_update_linux_pte(ptep, 1); + hva, &hugepage_shift); + if (ptep) { + pte = kvmppc_read_update_linux_pte(ptep, 1, + hugepage_shift); if (pte_write(pte)) write_ok = 1; } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 105b00f24f27..45e30d6e462b 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -27,7 +27,7 @@ static void *real_vmalloc_addr(void *x) unsigned long addr = (unsigned long) x; pte_t *p; - p = find_linux_pte(swapper_pg_dir, addr); + p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL); if (!p || !pte_present(*p)) return NULL; /* assume we don't have huge pages in vmalloc space... */ @@ -139,20 +139,18 @@ static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva, { pte_t *ptep; unsigned long ps = *pte_sizep; - unsigned int shift; + unsigned int hugepage_shift; - ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); + ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift); if (!ptep) return __pte(0); - if (shift) - *pte_sizep = 1ul << shift; + if (hugepage_shift) + *pte_sizep = 1ul << hugepage_shift; else *pte_sizep = PAGE_SIZE; if (ps > *pte_sizep) return __pte(0); - if (!pte_present(*ptep)) - return __pte(0); - return kvmppc_read_update_linux_pte(ptep, writing); + return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift); } static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 4e05f8c693b4..f55e14cd1762 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -418,6 +418,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) return kvmppc_core_create_memslot(slot, npages); } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index e15c521846ca..99c7fc16dc0d 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -580,7 +580,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) if (instr & 1) regs->link = regs->nip; if (branch_taken(instr, regs)) - regs->nip = imm; + regs->nip = truncate_if_32bit(regs->msr, imm); return 1; #ifdef CONFIG_PPC64 case 17: /* sc */ diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile index 7d1dba0d57f9..8d035d2d42a6 100644 --- a/arch/powerpc/math-emu/Makefile +++ b/arch/powerpc/math-emu/Makefile @@ -4,7 +4,8 @@ obj-$(CONFIG_MATH_EMULATION) += fabs.o fadd.o fadds.o fcmpo.o fcmpu.o \ fmadd.o fmadds.o fmsub.o fmsubs.o \ fmul.o fmuls.o fnabs.o fneg.o \ fnmadd.o fnmadds.o fnmsub.o fnmsubs.o \ - fres.o frsp.o frsqrte.o fsel.o lfs.o \ + fres.o fre.o frsp.o fsel.o lfs.o \ + frsqrte.o frsqrtes.o \ fsqrt.o fsqrts.o fsub.o fsubs.o \ mcrfs.o mffs.o mtfsb0.o mtfsb1.o \ mtfsf.o mtfsfi.o stfiwx.o stfs.o \ diff --git a/arch/powerpc/math-emu/fre.c b/arch/powerpc/math-emu/fre.c new file mode 100644 index 000000000000..49ccf2cc6a5a --- /dev/null +++ b/arch/powerpc/math-emu/fre.c @@ -0,0 +1,11 @@ +#include <linux/types.h> +#include <linux/errno.h> +#include <asm/uaccess.h> + +int fre(void *frD, void *frB) +{ +#ifdef DEBUG + printk("%s: %p %p\n", __func__, frD, frB); +#endif + return -ENOSYS; +} diff --git a/arch/powerpc/math-emu/frsqrtes.c b/arch/powerpc/math-emu/frsqrtes.c new file mode 100644 index 000000000000..7e838e380314 --- /dev/null +++ b/arch/powerpc/math-emu/frsqrtes.c @@ -0,0 +1,11 @@ +#include <linux/types.h> +#include <linux/errno.h> +#include <asm/uaccess.h> + +int frsqrtes(void *frD, void *frB) +{ +#ifdef DEBUG + printk("%s: %p %p\n", __func__, frD, frB); +#endif + return 0; +} diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c index 164d55935bd8..0328e66e0799 100644 --- a/arch/powerpc/math-emu/math.c +++ b/arch/powerpc/math-emu/math.c @@ -58,8 +58,10 @@ FLOATFUNC(fnabs); FLOATFUNC(fneg); /* Optional */ +FLOATFUNC(fre); FLOATFUNC(fres); FLOATFUNC(frsqrte); +FLOATFUNC(frsqrtes); FLOATFUNC(fsel); FLOATFUNC(fsqrt); FLOATFUNC(fsqrts); @@ -97,6 +99,7 @@ FLOATFUNC(fsqrts); #define FSQRTS 0x016 /* 22 */ #define FRES 0x018 /* 24 */ #define FMULS 0x019 /* 25 */ +#define FRSQRTES 0x01a /* 26 */ #define FMSUBS 0x01c /* 28 */ #define FMADDS 0x01d /* 29 */ #define FNMSUBS 0x01e /* 30 */ @@ -109,6 +112,7 @@ FLOATFUNC(fsqrts); #define FADD 0x015 /* 21 */ #define FSQRT 0x016 /* 22 */ #define FSEL 0x017 /* 23 */ +#define FRE 0x018 /* 24 */ #define FMUL 0x019 /* 25 */ #define FRSQRTE 0x01a /* 26 */ #define FMSUB 0x01c /* 28 */ @@ -299,9 +303,10 @@ do_mathemu(struct pt_regs *regs) case FDIVS: func = fdivs; type = AB; break; case FSUBS: func = fsubs; type = AB; break; case FADDS: func = fadds; type = AB; break; - case FSQRTS: func = fsqrts; type = AB; break; - case FRES: func = fres; type = AB; break; + case FSQRTS: func = fsqrts; type = XB; break; + case FRES: func = fres; type = XB; break; case FMULS: func = fmuls; type = AC; break; + case FRSQRTES: func = frsqrtes;type = XB; break; case FMSUBS: func = fmsubs; type = ABC; break; case FMADDS: func = fmadds; type = ABC; break; case FNMSUBS: func = fnmsubs; type = ABC; break; @@ -317,10 +322,11 @@ do_mathemu(struct pt_regs *regs) case FDIV: func = fdiv; type = AB; break; case FSUB: func = fsub; type = AB; break; case FADD: func = fadd; type = AB; break; - case FSQRT: func = fsqrt; type = AB; break; + case FSQRT: func = fsqrt; type = XB; break; + case FRE: func = fre; type = XB; break; case FSEL: func = fsel; type = ABC; break; case FMUL: func = fmul; type = AC; break; - case FRSQRTE: func = frsqrte; type = AB; break; + case FRSQRTE: func = frsqrte; type = XB; break; case FMSUB: func = fmsub; type = ABC; break; case FMADD: func = fmadd; type = ABC; break; case FNMSUB: func = fnmsub; type = ABC; break; diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index 2c9441ee6bb8..82b1ff759e26 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -41,7 +41,7 @@ int icache_44x_need_flush; unsigned long tlb_47x_boltmap[1024/8]; -static void __cpuinit ppc44x_update_tlb_hwater(void) +static void ppc44x_update_tlb_hwater(void) { extern unsigned int tlb_44x_patch_hwater_D[]; extern unsigned int tlb_44x_patch_hwater_I[]; @@ -134,7 +134,7 @@ static void __init ppc47x_update_boltmap(void) /* * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU */ -static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys) +static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys) { unsigned int rA; int bolted; @@ -229,7 +229,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, } #ifdef CONFIG_SMP -void __cpuinit mmu_init_secondary(int cpu) +void mmu_init_secondary(int cpu) { unsigned long addr; unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1); diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index cf16b5733eaa..51230ee6a407 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -6,17 +6,16 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) -obj-y := fault.o mem.o pgtable.o gup.o \ +obj-y := fault.o mem.o pgtable.o gup.o mmap.o \ init_$(CONFIG_WORD_SIZE).o \ pgtable_$(CONFIG_WORD_SIZE).o obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ tlb_nohash_low.o obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o -obj-$(CONFIG_PPC64) += mmap_64.o hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \ slb_low.o slb.o stab.o \ - mmap_64.o $(hash64-y) + $(hash64-y) obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ tlb_hash$(CONFIG_WORD_SIZE).o \ @@ -28,11 +27,12 @@ obj-$(CONFIG_44x) += 44x_mmu.o obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o obj-$(CONFIG_PPC_MM_SLICES) += slice.o -ifeq ($(CONFIG_HUGETLB_PAGE),y) obj-y += hugetlbpage.o +ifeq ($(CONFIG_HUGETLB_PAGE),y) obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o endif +obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c index 4b921affa495..49822d90ea96 100644 --- a/arch/powerpc/mm/gup.c +++ b/arch/powerpc/mm/gup.c @@ -34,7 +34,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, ptep = pte_offset_kernel(&pmd, addr); do { - pte_t pte = *ptep; + pte_t pte = ACCESS_ONCE(*ptep); struct page *page; if ((pte_val(pte) & mask) != result) @@ -63,12 +63,18 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, pmdp = pmd_offset(&pud, addr); do { - pmd_t pmd = *pmdp; + pmd_t pmd = ACCESS_ONCE(*pmdp); next = pmd_addr_end(addr, end); - if (pmd_none(pmd)) + /* + * If we find a splitting transparent hugepage we + * return zero. That will result in taking the slow + * path which will call wait_split_huge_page() + * if the pmd is still in splitting state + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; - if (pmd_huge(pmd)) { + if (pmd_huge(pmd) || pmd_large(pmd)) { if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next, write, pages, nr)) return 0; @@ -91,7 +97,7 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, pudp = pud_offset(&pgd, addr); do { - pud_t pud = *pudp; + pud_t pud = ACCESS_ONCE(*pudp); next = pud_addr_end(addr, end); if (pud_none(pud)) @@ -154,7 +160,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, pgdp = pgd_offset(mm, addr); do { - pgd_t pgd = *pgdp; + pgd_t pgd = ACCESS_ONCE(*pgdp); pr_devel(" %016lx: normal pgd %p\n", addr, (void *)pgd_val(pgd)); diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 0e980acae67c..d3cbda62857b 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -289,9 +289,10 @@ htab_modify_pte: /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* vpn */ - li r6,MMU_PAGE_4K /* page size */ - ld r7,STK_PARAM(R9)(r1) /* segment size */ - ld r8,STK_PARAM(R8)(r1) /* get "local" param */ + li r6,MMU_PAGE_4K /* base page size */ + li r7,MMU_PAGE_4K /* actual page size */ + ld r8,STK_PARAM(R9)(r1) /* segment size */ + ld r9,STK_PARAM(R8)(r1) /* get "local" param */ _GLOBAL(htab_call_hpte_updatepp) bl . /* Patched by htab_finish_init() */ @@ -649,9 +650,10 @@ htab_modify_pte: /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* vpn */ - li r6,MMU_PAGE_4K /* page size */ - ld r7,STK_PARAM(R9)(r1) /* segment size */ - ld r8,STK_PARAM(R8)(r1) /* get "local" param */ + li r6,MMU_PAGE_4K /* base page size */ + li r7,MMU_PAGE_4K /* actual page size */ + ld r8,STK_PARAM(R9)(r1) /* segment size */ + ld r9,STK_PARAM(R8)(r1) /* get "local" param */ _GLOBAL(htab_call_hpte_updatepp) bl . /* patched by htab_finish_init() */ @@ -937,9 +939,10 @@ ht64_modify_pte: /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* vpn */ - li r6,MMU_PAGE_64K - ld r7,STK_PARAM(R9)(r1) /* segment size */ - ld r8,STK_PARAM(R8)(r1) /* get "local" param */ + li r6,MMU_PAGE_64K /* base page size */ + li r7,MMU_PAGE_64K /* actual page size */ + ld r8,STK_PARAM(R9)(r1) /* segment size */ + ld r9,STK_PARAM(R8)(r1) /* get "local" param */ _GLOBAL(ht64_call_hpte_updatepp) bl . /* patched by htab_finish_init() */ diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 4c122c3f1623..3f0c30ae4791 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -273,61 +273,15 @@ static long native_hpte_remove(unsigned long hpte_group) return i; } -static inline int __hpte_actual_psize(unsigned int lp, int psize) -{ - int i, shift; - unsigned int mask; - - /* start from 1 ignoring MMU_PAGE_4K */ - for (i = 1; i < MMU_PAGE_COUNT; i++) { - - /* invalid penc */ - if (mmu_psize_defs[psize].penc[i] == -1) - continue; - /* - * encoding bits per actual page size - * PTE LP actual page size - * rrrr rrrz >=8KB - * rrrr rrzz >=16KB - * rrrr rzzz >=32KB - * rrrr zzzz >=64KB - * ....... - */ - shift = mmu_psize_defs[i].shift - LP_SHIFT; - if (shift > LP_BITS) - shift = LP_BITS; - mask = (1 << shift) - 1; - if ((lp & mask) == mmu_psize_defs[psize].penc[i]) - return i; - } - return -1; -} - -static inline int hpte_actual_psize(struct hash_pte *hptep, int psize) -{ - /* Look at the 8 bit LP value */ - unsigned int lp = (hptep->r >> LP_SHIFT) & ((1 << LP_BITS) - 1); - - if (!(hptep->v & HPTE_V_VALID)) - return -1; - - /* First check if it is large page */ - if (!(hptep->v & HPTE_V_LARGE)) - return MMU_PAGE_4K; - - return __hpte_actual_psize(lp, psize); -} - static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long vpn, int psize, int ssize, - int local) + unsigned long vpn, int bpsize, + int apsize, int ssize, int local) { struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v, want_v; int ret = 0; - int actual_psize; - want_v = hpte_encode_avpn(vpn, psize, ssize); + want_v = hpte_encode_avpn(vpn, bpsize, ssize); DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", vpn, want_v & HPTE_V_AVPN, slot, newpp); @@ -335,7 +289,6 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, native_lock_hpte(hptep); hpte_v = hptep->v; - actual_psize = hpte_actual_psize(hptep, psize); /* * We need to invalidate the TLB always because hpte_remove doesn't do * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less @@ -343,12 +296,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, * (hpte_remove) because we assume the old translation is still * technically "valid". */ - if (actual_psize < 0) { - actual_psize = psize; - ret = -1; - goto err_out; - } - if (!HPTE_V_COMPARE(hpte_v, want_v)) { + if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { DBG_LOW(" -> miss\n"); ret = -1; } else { @@ -357,11 +305,10 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); } -err_out: native_unlock_hpte(hptep); /* Ensure it is out of the tlb too. */ - tlbie(vpn, psize, actual_psize, ssize, local); + tlbie(vpn, bpsize, apsize, ssize, local); return ret; } @@ -402,7 +349,6 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize) static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { - int actual_psize; unsigned long vpn; unsigned long vsid; long slot; @@ -415,36 +361,33 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, if (slot == -1) panic("could not find page to bolt\n"); hptep = htab_address + slot; - actual_psize = hpte_actual_psize(hptep, psize); - if (actual_psize < 0) - actual_psize = psize; /* Update the HPTE */ hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | (newpp & (HPTE_R_PP | HPTE_R_N)); - - /* Ensure it is out of the tlb too. */ - tlbie(vpn, psize, actual_psize, ssize, 0); + /* + * Ensure it is out of the tlb too. Bolted entries base and + * actual page size will be same. + */ + tlbie(vpn, psize, psize, ssize, 0); } static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, - int psize, int ssize, int local) + int bpsize, int apsize, int ssize, int local) { struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v; unsigned long want_v; unsigned long flags; - int actual_psize; local_irq_save(flags); DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); - want_v = hpte_encode_avpn(vpn, psize, ssize); + want_v = hpte_encode_avpn(vpn, bpsize, ssize); native_lock_hpte(hptep); hpte_v = hptep->v; - actual_psize = hpte_actual_psize(hptep, psize); /* * We need to invalidate the TLB always because hpte_remove doesn't do * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less @@ -452,23 +395,120 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, * (hpte_remove) because we assume the old translation is still * technically "valid". */ - if (actual_psize < 0) { - actual_psize = psize; - native_unlock_hpte(hptep); - goto err_out; - } - if (!HPTE_V_COMPARE(hpte_v, want_v)) + if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) native_unlock_hpte(hptep); else /* Invalidate the hpte. NOTE: this also unlocks it */ hptep->v = 0; -err_out: /* Invalidate the TLB */ - tlbie(vpn, psize, actual_psize, ssize, local); + tlbie(vpn, bpsize, apsize, ssize, local); + + local_irq_restore(flags); +} + +static void native_hugepage_invalidate(struct mm_struct *mm, + unsigned char *hpte_slot_array, + unsigned long addr, int psize) +{ + int ssize = 0, i; + int lock_tlbie; + struct hash_pte *hptep; + int actual_psize = MMU_PAGE_16M; + unsigned int max_hpte_count, valid; + unsigned long flags, s_addr = addr; + unsigned long hpte_v, want_v, shift; + unsigned long hidx, vpn = 0, vsid, hash, slot; + + shift = mmu_psize_defs[psize].shift; + max_hpte_count = 1U << (PMD_SHIFT - shift); + + local_irq_save(flags); + for (i = 0; i < max_hpte_count; i++) { + valid = hpte_valid(hpte_slot_array, i); + if (!valid) + continue; + hidx = hpte_hash_index(hpte_slot_array, i); + + /* get the vpn */ + addr = s_addr + (i * (1ul << shift)); + if (!is_kernel_addr(addr)) { + ssize = user_segment_size(addr); + vsid = get_vsid(mm->context.id, addr, ssize); + WARN_ON(vsid == 0); + } else { + vsid = get_kernel_vsid(addr, mmu_kernel_ssize); + ssize = mmu_kernel_ssize; + } + + vpn = hpt_vpn(addr, vsid, ssize); + hash = hpt_hash(vpn, shift, ssize); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + + hptep = htab_address + slot; + want_v = hpte_encode_avpn(vpn, psize, ssize); + native_lock_hpte(hptep); + hpte_v = hptep->v; + + /* Even if we miss, we need to invalidate the TLB */ + if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) + native_unlock_hpte(hptep); + else + /* Invalidate the hpte. NOTE: this also unlocks it */ + hptep->v = 0; + } + /* + * Since this is a hugepage, we just need a single tlbie. + * use the last vpn. + */ + lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); + if (lock_tlbie) + raw_spin_lock(&native_tlbie_lock); + + asm volatile("ptesync":::"memory"); + __tlbie(vpn, psize, actual_psize, ssize); + asm volatile("eieio; tlbsync; ptesync":::"memory"); + + if (lock_tlbie) + raw_spin_unlock(&native_tlbie_lock); + local_irq_restore(flags); } +static inline int __hpte_actual_psize(unsigned int lp, int psize) +{ + int i, shift; + unsigned int mask; + + /* start from 1 ignoring MMU_PAGE_4K */ + for (i = 1; i < MMU_PAGE_COUNT; i++) { + + /* invalid penc */ + if (mmu_psize_defs[psize].penc[i] == -1) + continue; + /* + * encoding bits per actual page size + * PTE LP actual page size + * rrrr rrrz >=8KB + * rrrr rrzz >=16KB + * rrrr rzzz >=32KB + * rrrr zzzz >=64KB + * ....... + */ + shift = mmu_psize_defs[i].shift - LP_SHIFT; + if (shift > LP_BITS) + shift = LP_BITS; + mask = (1 << shift) - 1; + if ((lp & mask) == mmu_psize_defs[psize].penc[i]) + return i; + } + return -1; +} + static void hpte_decode(struct hash_pte *hpte, unsigned long slot, int *psize, int *apsize, int *ssize, unsigned long *vpn) { @@ -672,4 +712,5 @@ void __init hpte_init_native(void) ppc_md.hpte_remove = native_hpte_remove; ppc_md.hpte_clear_all = native_hpte_clear; ppc_md.flush_hash_range = native_flush_hash_range; + ppc_md.hugepage_invalidate = native_hugepage_invalidate; } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index e303a6d74e3a..6ecc38bd5b24 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -807,7 +807,7 @@ void __init early_init_mmu(void) } #ifdef CONFIG_SMP -void __cpuinit early_init_mmu_secondary(void) +void early_init_mmu_secondary(void) { /* Initialize hash table for that CPU */ if (!firmware_has_feature(FW_FEATURE_LPAR)) @@ -1050,13 +1050,26 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) goto bail; } -#ifdef CONFIG_HUGETLB_PAGE if (hugeshift) { - rc = __hash_page_huge(ea, access, vsid, ptep, trap, local, - ssize, hugeshift, psize); + if (pmd_trans_huge(*(pmd_t *)ptep)) + rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, + trap, local, ssize, psize); +#ifdef CONFIG_HUGETLB_PAGE + else + rc = __hash_page_huge(ea, access, vsid, ptep, trap, + local, ssize, hugeshift, psize); +#else + else { + /* + * if we have hugeshift, and is not transhuge with + * hugetlb disabled, something is really wrong. + */ + rc = 1; + WARN_ON(1); + } +#endif goto bail; } -#endif /* CONFIG_HUGETLB_PAGE */ #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); @@ -1145,6 +1158,7 @@ EXPORT_SYMBOL_GPL(hash_page); void hash_preload(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap) { + int hugepage_shift; unsigned long vsid; pgd_t *pgdir; pte_t *ptep; @@ -1166,10 +1180,27 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, pgdir = mm->pgd; if (pgdir == NULL) return; - ptep = find_linux_pte(pgdir, ea); - if (!ptep) + + /* Get VSID */ + ssize = user_segment_size(ea); + vsid = get_vsid(mm->context.id, ea, ssize); + if (!vsid) return; + /* + * Hash doesn't like irqs. Walking linux page table with irq disabled + * saves us from holding multiple locks. + */ + local_irq_save(flags); + + /* + * THP pages use update_mmu_cache_pmd. We don't do + * hash preload there. Hence can ignore THP here + */ + ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift); + if (!ptep) + goto out_exit; + WARN_ON(hugepage_shift); #ifdef CONFIG_PPC_64K_PAGES /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on * a 64K kernel), then we don't preload, hash_page() will take @@ -1178,18 +1209,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, * page size demotion here */ if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE)) - return; + goto out_exit; #endif /* CONFIG_PPC_64K_PAGES */ - /* Get VSID */ - ssize = user_segment_size(ea); - vsid = get_vsid(mm->context.id, ea, ssize); - if (!vsid) - return; - - /* Hash doesn't like irqs */ - local_irq_save(flags); - /* Is that local to this CPU ? */ if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) local = 1; @@ -1211,7 +1233,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, mm->context.user_psize, mm->context.user_psize, pte_val(*ptep)); - +out_exit: local_irq_restore(flags); } @@ -1232,7 +1254,11 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); - ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local); + /* + * We use same base page size and actual psize, because we don't + * use these functions for hugepage + */ + ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local); } pte_iterate_hashed_end(); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -1365,7 +1391,8 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; - ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0); + ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize, + mmu_kernel_ssize, 0); } void kernel_map_pages(struct page *page, int numpages, int enable) diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c new file mode 100644 index 000000000000..34de9e0cdc34 --- /dev/null +++ b/arch/powerpc/mm/hugepage-hash64.c @@ -0,0 +1,175 @@ +/* + * Copyright IBM Corporation, 2013 + * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +/* + * PPC64 THP Support for hash based MMUs + */ +#include <linux/mm.h> +#include <asm/machdep.h> + +int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, + pmd_t *pmdp, unsigned long trap, int local, int ssize, + unsigned int psize) +{ + unsigned int index, valid; + unsigned char *hpte_slot_array; + unsigned long rflags, pa, hidx; + unsigned long old_pmd, new_pmd; + int ret, lpsize = MMU_PAGE_16M; + unsigned long vpn, hash, shift, slot; + + /* + * atomically mark the linux large page PMD busy and dirty + */ + do { + old_pmd = pmd_val(*pmdp); + /* If PMD busy, retry the access */ + if (unlikely(old_pmd & _PAGE_BUSY)) + return 0; + /* If PMD is trans splitting retry the access */ + if (unlikely(old_pmd & _PAGE_SPLITTING)) + return 0; + /* If PMD permissions don't match, take page fault */ + if (unlikely(access & ~old_pmd)) + return 1; + /* + * Try to lock the PTE, add ACCESSED and DIRTY if it was + * a write access + */ + new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED; + if (access & _PAGE_RW) + new_pmd |= _PAGE_DIRTY; + } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp, + old_pmd, new_pmd)); + /* + * PP bits. _PAGE_USER is already PP bit 0x2, so we only + * need to add in 0x1 if it's a read-only user page + */ + rflags = new_pmd & _PAGE_USER; + if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) && + (new_pmd & _PAGE_DIRTY))) + rflags |= 0x1; + /* + * _PAGE_EXEC -> HW_NO_EXEC since it's inverted + */ + rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N); + +#if 0 + if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { + + /* + * No CPU has hugepages but lacks no execute, so we + * don't need to worry about that case + */ + rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); + } +#endif + /* + * Find the slot index details for this ea, using base page size. + */ + shift = mmu_psize_defs[psize].shift; + index = (ea & ~HPAGE_PMD_MASK) >> shift; + BUG_ON(index >= 4096); + + vpn = hpt_vpn(ea, vsid, ssize); + hash = hpt_hash(vpn, shift, ssize); + hpte_slot_array = get_hpte_slot_array(pmdp); + + valid = hpte_valid(hpte_slot_array, index); + if (valid) { + /* update the hpte bits */ + hidx = hpte_hash_index(hpte_slot_array, index); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + + ret = ppc_md.hpte_updatepp(slot, rflags, vpn, + psize, lpsize, ssize, local); + /* + * We failed to update, try to insert a new entry. + */ + if (ret == -1) { + /* + * large pte is marked busy, so we can be sure + * nobody is looking at hpte_slot_array. hence we can + * safely update this here. + */ + valid = 0; + new_pmd &= ~_PAGE_HPTEFLAGS; + hpte_slot_array[index] = 0; + } else + /* clear the busy bits and set the hash pte bits */ + new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; + } + + if (!valid) { + unsigned long hpte_group; + + /* insert new entry */ + pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; +repeat: + hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + + /* clear the busy bits and set the hash pte bits */ + new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; + + /* Add in WIMG bits */ + rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | + _PAGE_COHERENT | _PAGE_GUARDED)); + + /* Insert into the hash table, primary slot */ + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, + psize, lpsize, ssize); + /* + * Primary is full, try the secondary + */ + if (unlikely(slot == -1)) { + hpte_group = ((~hash & htab_hash_mask) * + HPTES_PER_GROUP) & ~0x7UL; + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, + rflags, HPTE_V_SECONDARY, + psize, lpsize, ssize); + if (slot == -1) { + if (mftb() & 0x1) + hpte_group = ((hash & htab_hash_mask) * + HPTES_PER_GROUP) & ~0x7UL; + + ppc_md.hpte_remove(hpte_group); + goto repeat; + } + } + /* + * Hypervisor failure. Restore old pmd and return -1 + * similar to __hash_page_* + */ + if (unlikely(slot == -2)) { + *pmdp = __pmd(old_pmd); + hash_failure_debug(ea, access, vsid, trap, ssize, + psize, lpsize, old_pmd); + return -1; + } + /* + * large pte is marked busy, so we can be sure + * nobody is looking at hpte_slot_array. hence we can + * safely update this here. + */ + mark_hpte_slot_valid(hpte_slot_array, index, slot); + } + /* + * No need to use ldarx/stdcx here + */ + *pmdp = __pmd(new_pmd & ~_PAGE_BUSY); + return 0; +} diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index 0f1d94a1fb82..0b7fb6761015 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -81,7 +81,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, slot += (old_pte & _PAGE_F_GIX) >> 12; if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize, - ssize, local) == -1) + mmu_psize, ssize, local) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 77fdd2cef33b..834ca8eb38f2 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -21,6 +21,9 @@ #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/setup.h> +#include <asm/hugetlb.h> + +#ifdef CONFIG_HUGETLB_PAGE #define PAGE_SHIFT_64K 16 #define PAGE_SHIFT_16M 24 @@ -100,68 +103,9 @@ int pgd_huge(pgd_t pgd) } #endif -/* - * We have 4 cases for pgds and pmds: - * (1) invalid (all zeroes) - * (2) pointer to next table, as normal; bottom 6 bits == 0 - * (3) leaf pte for huge page, bottom two bits != 00 - * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table - */ -pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) -{ - pgd_t *pg; - pud_t *pu; - pmd_t *pm; - pte_t *ret_pte; - hugepd_t *hpdp = NULL; - unsigned pdshift = PGDIR_SHIFT; - - if (shift) - *shift = 0; - - pg = pgdir + pgd_index(ea); - - if (pgd_huge(*pg)) { - ret_pte = (pte_t *) pg; - goto out; - } else if (is_hugepd(pg)) - hpdp = (hugepd_t *)pg; - else if (!pgd_none(*pg)) { - pdshift = PUD_SHIFT; - pu = pud_offset(pg, ea); - - if (pud_huge(*pu)) { - ret_pte = (pte_t *) pu; - goto out; - } else if (is_hugepd(pu)) - hpdp = (hugepd_t *)pu; - else if (!pud_none(*pu)) { - pdshift = PMD_SHIFT; - pm = pmd_offset(pu, ea); - - if (pmd_huge(*pm)) { - ret_pte = (pte_t *) pm; - goto out; - } else if (is_hugepd(pm)) - hpdp = (hugepd_t *)pm; - else if (!pmd_none(*pm)) - return pte_offset_kernel(pm, ea); - } - } - if (!hpdp) - return NULL; - - ret_pte = hugepte_offset(hpdp, ea, pdshift); - pdshift = hugepd_shift(*hpdp); -out: - if (shift) - *shift = pdshift; - return ret_pte; -} -EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte); - pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { + /* Only called for hugetlbfs pages, hence can ignore THP */ return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); } @@ -357,7 +301,7 @@ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) int alloc_bootmem_huge_page(struct hstate *hstate) { struct huge_bootmem_page *m; - int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT); + int idx = shift_to_mmu_psize(huge_page_shift(hstate)); int nr_gpages = gpage_freearray[idx].nr_gpages; if (nr_gpages == 0) @@ -736,11 +680,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) struct page *page; unsigned shift; unsigned long mask; - + /* + * Transparent hugepages are handled by generic code. We can skip them + * here. + */ ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); /* Verify it is a huge page else bail. */ - if (!ptep || !shift) + if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) return ERR_PTR(-EINVAL); mask = (1UL << shift) - 1; @@ -759,69 +706,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, return NULL; } -int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, - unsigned long end, int write, struct page **pages, int *nr) -{ - unsigned long mask; - unsigned long pte_end; - struct page *head, *page, *tail; - pte_t pte; - int refs; - - pte_end = (addr + sz) & ~(sz-1); - if (pte_end < end) - end = pte_end; - - pte = *ptep; - mask = _PAGE_PRESENT | _PAGE_USER; - if (write) - mask |= _PAGE_RW; - - if ((pte_val(pte) & mask) != mask) - return 0; - - /* hugepages are never "special" */ - VM_BUG_ON(!pfn_valid(pte_pfn(pte))); - - refs = 0; - head = pte_page(pte); - - page = head + ((addr & (sz-1)) >> PAGE_SHIFT); - tail = page; - do { - VM_BUG_ON(compound_head(page) != head); - pages[*nr] = page; - (*nr)++; - page++; - refs++; - } while (addr += PAGE_SIZE, addr != end); - - if (!page_cache_add_speculative(head, refs)) { - *nr -= refs; - return 0; - } - - if (unlikely(pte_val(pte) != pte_val(*ptep))) { - /* Could be optimized better */ - *nr -= refs; - while (refs--) - put_page(head); - return 0; - } - - /* - * Any tail page need their mapcount reference taken before we - * return. - */ - while (refs--) { - if (PageTail(tail)) - get_huge_page_tail(tail); - tail++; - } - - return 1; -} - static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, unsigned long sz) { @@ -1038,3 +922,168 @@ void flush_dcache_icache_hugepage(struct page *page) } } } + +#endif /* CONFIG_HUGETLB_PAGE */ + +/* + * We have 4 cases for pgds and pmds: + * (1) invalid (all zeroes) + * (2) pointer to next table, as normal; bottom 6 bits == 0 + * (3) leaf pte for huge page, bottom two bits != 00 + * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table + * + * So long as we atomically load page table pointers we are safe against teardown, + * we can follow the address down to the the page and take a ref on it. + */ + +pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) +{ + pgd_t pgd, *pgdp; + pud_t pud, *pudp; + pmd_t pmd, *pmdp; + pte_t *ret_pte; + hugepd_t *hpdp = NULL; + unsigned pdshift = PGDIR_SHIFT; + + if (shift) + *shift = 0; + + pgdp = pgdir + pgd_index(ea); + pgd = ACCESS_ONCE(*pgdp); + /* + * Always operate on the local stack value. This make sure the + * value don't get updated by a parallel THP split/collapse, + * page fault or a page unmap. The return pte_t * is still not + * stable. So should be checked there for above conditions. + */ + if (pgd_none(pgd)) + return NULL; + else if (pgd_huge(pgd)) { + ret_pte = (pte_t *) pgdp; + goto out; + } else if (is_hugepd(&pgd)) + hpdp = (hugepd_t *)&pgd; + else { + /* + * Even if we end up with an unmap, the pgtable will not + * be freed, because we do an rcu free and here we are + * irq disabled + */ + pdshift = PUD_SHIFT; + pudp = pud_offset(&pgd, ea); + pud = ACCESS_ONCE(*pudp); + + if (pud_none(pud)) + return NULL; + else if (pud_huge(pud)) { + ret_pte = (pte_t *) pudp; + goto out; + } else if (is_hugepd(&pud)) + hpdp = (hugepd_t *)&pud; + else { + pdshift = PMD_SHIFT; + pmdp = pmd_offset(&pud, ea); + pmd = ACCESS_ONCE(*pmdp); + /* + * A hugepage collapse is captured by pmd_none, because + * it mark the pmd none and do a hpte invalidate. + * + * A hugepage split is captured by pmd_trans_splitting + * because we mark the pmd trans splitting and do a + * hpte invalidate + * + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + return NULL; + + if (pmd_huge(pmd) || pmd_large(pmd)) { + ret_pte = (pte_t *) pmdp; + goto out; + } else if (is_hugepd(&pmd)) + hpdp = (hugepd_t *)&pmd; + else + return pte_offset_kernel(&pmd, ea); + } + } + if (!hpdp) + return NULL; + + ret_pte = hugepte_offset(hpdp, ea, pdshift); + pdshift = hugepd_shift(*hpdp); +out: + if (shift) + *shift = pdshift; + return ret_pte; +} +EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte); + +int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + unsigned long mask; + unsigned long pte_end; + struct page *head, *page, *tail; + pte_t pte; + int refs; + + pte_end = (addr + sz) & ~(sz-1); + if (pte_end < end) + end = pte_end; + + pte = ACCESS_ONCE(*ptep); + mask = _PAGE_PRESENT | _PAGE_USER; + if (write) + mask |= _PAGE_RW; + + if ((pte_val(pte) & mask) != mask) + return 0; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* + * check for splitting here + */ + if (pmd_trans_splitting(pte_pmd(pte))) + return 0; +#endif + + /* hugepages are never "special" */ + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + + refs = 0; + head = pte_page(pte); + + page = head + ((addr & (sz-1)) >> PAGE_SHIFT); + tail = page; + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; + } + + if (unlikely(pte_val(pte) != pte_val(*ptep))) { + /* Could be optimized better */ + *nr -= refs; + while (refs--) + put_page(head); + return 0; + } + + /* + * Any tail page need their mapcount reference taken before we + * return. + */ + while (refs--) { + if (PageTail(tail)) + get_huge_page_tail(tail); + tail++; + } + + return 1; +} diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index a90b9c458990..d0cd9e4c6837 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -88,7 +88,11 @@ static void pgd_ctor(void *addr) static void pmd_ctor(void *addr) { +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + memset(addr, 0, PMD_TABLE_SIZE * 2); +#else memset(addr, 0, PMD_TABLE_SIZE); +#endif } struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; @@ -137,10 +141,9 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) void pgtable_cache_init(void) { pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); - pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor); - if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE)) + pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); + if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX)) panic("Couldn't allocate pgtable caches"); - /* In all current configs, when the PUD index exists it's the * same size as either the pgd or pmd index. Verify that the * initialization above has also created a PUD cache. This diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0988a26e0413..7f4bea162026 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -299,47 +299,13 @@ void __init paging_init(void) void __init mem_init(void) { -#ifdef CONFIG_NEED_MULTIPLE_NODES - int nid; -#endif - pg_data_t *pgdat; - unsigned long i; - struct page *page; - unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; - #ifdef CONFIG_SWIOTLB swiotlb_init(0); #endif - num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); - -#ifdef CONFIG_NEED_MULTIPLE_NODES - for_each_online_node(nid) { - if (NODE_DATA(nid)->node_spanned_pages != 0) { - printk("freeing bootmem node %d\n", nid); - totalram_pages += - free_all_bootmem_node(NODE_DATA(nid)); - } - } -#else - max_mapnr = max_pfn; - totalram_pages += free_all_bootmem(); -#endif - for_each_online_pgdat(pgdat) { - for (i = 0; i < pgdat->node_spanned_pages; i++) { - if (!pfn_valid(pgdat->node_start_pfn + i)) - continue; - page = pgdat_page_nr(pgdat, i); - if (PageReserved(page)) - reservedpages++; - } - } - - codesize = (unsigned long)&_sdata - (unsigned long)&_stext; - datasize = (unsigned long)&_edata - (unsigned long)&_sdata; - initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; - bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; + set_max_mapnr(max_pfn); + free_all_bootmem(); #ifdef CONFIG_HIGHMEM { @@ -349,13 +315,9 @@ void __init mem_init(void) for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; struct page *page = pfn_to_page(pfn); - if (memblock_is_reserved(paddr)) - continue; - free_highmem_page(page); - reservedpages--; + if (!memblock_is_reserved(paddr)) + free_highmem_page(page); } - printk(KERN_DEBUG "High memory: %luk\n", - totalhigh_pages << (PAGE_SHIFT-10)); } #endif /* CONFIG_HIGHMEM */ @@ -368,16 +330,7 @@ void __init mem_init(void) (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif - printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " - "%luk reserved, %luk data, %luk bss, %luk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - bsssize >> 10, - initsize >> 10); - + mem_init_print_info(NULL); #ifdef CONFIG_PPC32 pr_info("Kernel virtual memory layout:\n"); pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); @@ -407,7 +360,7 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif @@ -508,6 +461,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { #ifdef CONFIG_PPC_STD_MMU + /* + * We don't need to worry about _PAGE_PRESENT here because we are + * called with either mm->page_table_lock held or ptl lock held + */ unsigned long access = 0, trap; /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap.c index 67a42ed0d2fc..cb8bdbe4972f 100644 --- a/arch/powerpc/mm/mmap_64.c +++ b/arch/powerpc/mm/mmap.c @@ -92,10 +92,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index e779642c25e5..af3d78e19302 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -112,8 +112,10 @@ static unsigned int steal_context_smp(unsigned int id) */ for_each_cpu(cpu, mm_cpumask(mm)) { for (i = cpu_first_thread_sibling(cpu); - i <= cpu_last_thread_sibling(cpu); i++) - __set_bit(id, stale_map[i]); + i <= cpu_last_thread_sibling(cpu); i++) { + if (stale_map[i]) + __set_bit(id, stale_map[i]); + } cpu = i - 1; } return id; @@ -272,7 +274,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) /* XXX This clear should ultimately be part of local_flush_tlb_mm */ for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) { - __clear_bit(id, stale_map[i]); + if (stale_map[i]) + __clear_bit(id, stale_map[i]); } } @@ -329,8 +332,8 @@ void destroy_context(struct mm_struct *mm) #ifdef CONFIG_SMP -static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int mmu_context_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; @@ -363,7 +366,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata mmu_context_cpu_nb = { +static struct notifier_block mmu_context_cpu_nb = { .notifier_call = mmu_context_cpu_notify, }; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 88c0425dc0a8..08397217e8ac 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -516,7 +516,7 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, * Figure out to which domain a cpu belongs and stick it there. * Return the id of the domain used. */ -static int __cpuinit numa_setup_cpu(unsigned long lcpu) +static int numa_setup_cpu(unsigned long lcpu) { int nid = 0; struct device_node *cpu = of_get_cpu_node(lcpu, NULL); @@ -538,8 +538,7 @@ out: return nid; } -static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, - unsigned long action, +static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned long lcpu = (unsigned long)hcpu; @@ -919,7 +918,7 @@ static void __init *careful_zallocation(int nid, unsigned long size, return ret; } -static struct notifier_block __cpuinitdata ppc64_numa_nb = { +static struct notifier_block ppc64_numa_nb = { .notifier_call = cpu_numa_callback, .priority = 1 /* Must run before sched domains notifier. */ }; @@ -1433,11 +1432,9 @@ static int update_cpu_topology(void *data) if (cpu != update->cpu) continue; - unregister_cpu_under_node(update->cpu, update->old_nid); unmap_cpu_from_node(update->cpu); map_cpu_to_node(update->cpu, update->new_nid); vdso_getcpu_init(); - register_cpu_under_node(update->cpu, update->new_nid); } return 0; @@ -1485,6 +1482,9 @@ int arch_update_cpu_topology(void) stop_machine(update_cpu_topology, &updates[0], &updated_cpus); for (ud = &updates[0]; ud; ud = ud->next) { + unregister_cpu_under_node(ud->cpu, ud->old_nid); + register_cpu_under_node(ud->cpu, ud->new_nid); + dev = get_cpu_device(ud->cpu); if (dev) kobject_uevent(&dev->kobj, KOBJ_CHANGE); diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 214130a4edc6..edda589795c3 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -235,6 +235,14 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr) pud = pud_offset(pgd, addr); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, addr); + /* + * khugepaged to collapse normal pages to hugepage, first set + * pmd to none to force page fault/gup to take mmap_sem. After + * pmd is set to none, we do a pte_clear which does this assertion + * so if we find pmd none, return. + */ + if (pmd_none(*pmd)) + return; BUG_ON(!pmd_present(*pmd)); assert_spin_locked(pte_lockptr(mm, pmd)); } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index a854096e1023..536eec72c0f7 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -338,6 +338,19 @@ EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(__iounmap); EXPORT_SYMBOL(__iounmap_at); +/* + * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags + * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. + */ +struct page *pmd_page(pmd_t pmd) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (pmd_trans_huge(pmd)) + return pfn_to_page(pmd_pfn(pmd)); +#endif + return virt_to_page(pmd_page_vaddr(pmd)); +} + #ifdef CONFIG_PPC_64K_PAGES static pte_t *get_from_cache(struct mm_struct *mm) { @@ -455,3 +468,404 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) } #endif #endif /* CONFIG_PPC_64K_PAGES */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +/* + * This is called when relaxing access to a hugepage. It's also called in the page + * fault path when we don't hit any of the major fault cases, ie, a minor + * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have + * handled those two for us, we additionally deal with missing execute + * permission here on some processors + */ +int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp, pmd_t entry, int dirty) +{ + int changed; +#ifdef CONFIG_DEBUG_VM + WARN_ON(!pmd_trans_huge(*pmdp)); + assert_spin_locked(&vma->vm_mm->page_table_lock); +#endif + changed = !pmd_same(*(pmdp), entry); + if (changed) { + __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); + /* + * Since we are not supporting SW TLB systems, we don't + * have any thing similar to flush_tlb_page_nohash() + */ + } + return changed; +} + +unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, unsigned long clr) +{ + + unsigned long old, tmp; + +#ifdef CONFIG_DEBUG_VM + WARN_ON(!pmd_trans_huge(*pmdp)); + assert_spin_locked(&mm->page_table_lock); +#endif + +#ifdef PTE_ATOMIC_UPDATES + __asm__ __volatile__( + "1: ldarx %0,0,%3\n\ + andi. %1,%0,%6\n\ + bne- 1b \n\ + andc %1,%0,%4 \n\ + stdcx. %1,0,%3 \n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) + : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) + : "cc" ); +#else + old = pmd_val(*pmdp); + *pmdp = __pmd(old & ~clr); +#endif + if (old & _PAGE_HASHPTE) + hpte_do_hugepage_flush(mm, addr, pmdp); + return old; +} + +pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + if (pmd_trans_huge(*pmdp)) { + pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); + } else { + /* + * khugepaged calls this for normal pmd + */ + pmd = *pmdp; + pmd_clear(pmdp); + /* + * Wait for all pending hash_page to finish. This is needed + * in case of subpage collapse. When we collapse normal pages + * to hugepage, we first clear the pmd, then invalidate all + * the PTE entries. The assumption here is that any low level + * page fault will see a none pmd and take the slow path that + * will wait on mmap_sem. But we could very well be in a + * hash_page with local ptep pointer value. Such a hash page + * can result in adding new HPTE entries for normal subpages. + * That means we could be modifying the page content as we + * copy them to a huge page. So wait for parallel hash_page + * to finish before invalidating HPTE entries. We can do this + * by sending an IPI to all the cpus and executing a dummy + * function there. + */ + kick_all_cpus_sync(); + /* + * Now invalidate the hpte entries in the range + * covered by pmd. This make sure we take a + * fault and will find the pmd as none, which will + * result in a major fault which takes mmap_sem and + * hence wait for collapse to complete. Without this + * the __collapse_huge_page_copy can result in copying + * the old content. + */ + flush_tlb_pmd_range(vma->vm_mm, &pmd, address); + } + return pmd; +} + +int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); +} + +/* + * We currently remove entries from the hashtable regardless of whether + * the entry was young or dirty. The generic routines only flush if the + * entry was young or dirty which is not good enough. + * + * We should be more intelligent about this but for the moment we override + * these functions and force a tlb flush unconditionally + */ +int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); +} + +/* + * We mark the pmd splitting and invalidate all the hpte + * entries for this hugepage. + */ +void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + unsigned long old, tmp; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + +#ifdef CONFIG_DEBUG_VM + WARN_ON(!pmd_trans_huge(*pmdp)); + assert_spin_locked(&vma->vm_mm->page_table_lock); +#endif + +#ifdef PTE_ATOMIC_UPDATES + + __asm__ __volatile__( + "1: ldarx %0,0,%3\n\ + andi. %1,%0,%6\n\ + bne- 1b \n\ + ori %1,%0,%4 \n\ + stdcx. %1,0,%3 \n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) + : "r" (pmdp), "i" (_PAGE_SPLITTING), "m" (*pmdp), "i" (_PAGE_BUSY) + : "cc" ); +#else + old = pmd_val(*pmdp); + *pmdp = __pmd(old | _PAGE_SPLITTING); +#endif + /* + * If we didn't had the splitting flag set, go and flush the + * HPTE entries. + */ + if (!(old & _PAGE_SPLITTING)) { + /* We need to flush the hpte */ + if (old & _PAGE_HASHPTE) + hpte_do_hugepage_flush(vma->vm_mm, address, pmdp); + } +} + +/* + * We want to put the pgtable in pmd and use pgtable for tracking + * the base page size hptes + */ +void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable) +{ + pgtable_t *pgtable_slot; + assert_spin_locked(&mm->page_table_lock); + /* + * we store the pgtable in the second half of PMD + */ + pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; + *pgtable_slot = pgtable; + /* + * expose the deposited pgtable to other cpus. + * before we set the hugepage PTE at pmd level + * hash fault code looks at the deposted pgtable + * to store hash index values. + */ + smp_wmb(); +} + +pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) +{ + pgtable_t pgtable; + pgtable_t *pgtable_slot; + + assert_spin_locked(&mm->page_table_lock); + pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; + pgtable = *pgtable_slot; + /* + * Once we withdraw, mark the entry NULL. + */ + *pgtable_slot = NULL; + /* + * We store HPTE information in the deposited PTE fragment. + * zero out the content on withdraw. + */ + memset(pgtable, 0, PTE_FRAG_SIZE); + return pgtable; +} + +/* + * set a new huge pmd. We should not be called for updating + * an existing pmd entry. That should go via pmd_hugepage_update. + */ +void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ +#ifdef CONFIG_DEBUG_VM + WARN_ON(!pmd_none(*pmdp)); + assert_spin_locked(&mm->page_table_lock); + WARN_ON(!pmd_trans_huge(pmd)); +#endif + return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); +} + +void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); +} + +/* + * A linux hugepage PMD was changed and the corresponding hash table entries + * neesd to be flushed. + */ +void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + int ssize, i; + unsigned long s_addr; + int max_hpte_count; + unsigned int psize, valid; + unsigned char *hpte_slot_array; + unsigned long hidx, vpn, vsid, hash, shift, slot; + + /* + * Flush all the hptes mapping this hugepage + */ + s_addr = addr & HPAGE_PMD_MASK; + hpte_slot_array = get_hpte_slot_array(pmdp); + /* + * IF we try to do a HUGE PTE update after a withdraw is done. + * we will find the below NULL. This happens when we do + * split_huge_page_pmd + */ + if (!hpte_slot_array) + return; + + /* get the base page size */ + psize = get_slice_psize(mm, s_addr); + + if (ppc_md.hugepage_invalidate) + return ppc_md.hugepage_invalidate(mm, hpte_slot_array, + s_addr, psize); + /* + * No bluk hpte removal support, invalidate each entry + */ + shift = mmu_psize_defs[psize].shift; + max_hpte_count = HPAGE_PMD_SIZE >> shift; + for (i = 0; i < max_hpte_count; i++) { + /* + * 8 bits per each hpte entries + * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] + */ + valid = hpte_valid(hpte_slot_array, i); + if (!valid) + continue; + hidx = hpte_hash_index(hpte_slot_array, i); + + /* get the vpn */ + addr = s_addr + (i * (1ul << shift)); + if (!is_kernel_addr(addr)) { + ssize = user_segment_size(addr); + vsid = get_vsid(mm->context.id, addr, ssize); + WARN_ON(vsid == 0); + } else { + vsid = get_kernel_vsid(addr, mmu_kernel_ssize); + ssize = mmu_kernel_ssize; + } + + vpn = hpt_vpn(addr, vsid, ssize); + hash = hpt_hash(vpn, shift, ssize); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + ppc_md.hpte_invalidate(slot, vpn, psize, + MMU_PAGE_16M, ssize, 0); + } +} + +static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) +{ + pmd_val(pmd) |= pgprot_val(pgprot); + return pmd; +} + +pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) +{ + pmd_t pmd; + /* + * For a valid pte, we would have _PAGE_PRESENT or _PAGE_FILE always + * set. We use this to check THP page at pmd level. + * leaf pte for huge page, bottom two bits != 00 + */ + pmd_val(pmd) = pfn << PTE_RPN_SHIFT; + pmd_val(pmd) |= _PAGE_THP_HUGE; + pmd = pmd_set_protbits(pmd, pgprot); + return pmd; +} + +pmd_t mk_pmd(struct page *page, pgprot_t pgprot) +{ + return pfn_pmd(page_to_pfn(page), pgprot); +} + +pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + + pmd_val(pmd) &= _HPAGE_CHG_MASK; + pmd = pmd_set_protbits(pmd, newprot); + return pmd; +} + +/* + * This is called at the end of handling a user page fault, when the + * fault has been handled by updating a HUGE PMD entry in the linux page tables. + * We use it to preload an HPTE into the hash table corresponding to + * the updated linux HUGE PMD entry. + */ +void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd) +{ + return; +} + +pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + pmd_t old_pmd; + pgtable_t pgtable; + unsigned long old; + pgtable_t *pgtable_slot; + + old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); + old_pmd = __pmd(old); + /* + * We have pmd == none and we are holding page_table_lock. + * So we can safely go and clear the pgtable hash + * index info. + */ + pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; + pgtable = *pgtable_slot; + /* + * Let's zero out old valid and hash index details + * hash fault look at them. + */ + memset(pgtable, 0, PTE_FRAG_SIZE); + return old_pmd; +} + +int has_transparent_hugepage(void) +{ + if (!mmu_has_feature(MMU_FTR_16M_PAGE)) + return 0; + /* + * We support THP only if PMD_SIZE is 16MB. + */ + if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) + return 0; + /* + * We need to make sure that we support 16MB hugepage in a segement + * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE + * of 64K. + */ + /* + * If we have 64K HPTE, we will be using that by default + */ + if (mmu_psize_defs[MMU_PAGE_64K].shift && + (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) + return 0; + /* + * Ok we only have 4K HPTE + */ + if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) + return 0; + + return 1; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index 7c415ddde948..aa74acb0fdfc 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -130,6 +130,53 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) up_write(&mm->mmap_sem); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct vm_area_struct *vma = walk->private; + split_huge_page_pmd(vma, addr, pmd); + return 0; +} + +static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, + unsigned long len) +{ + struct vm_area_struct *vma; + struct mm_walk subpage_proto_walk = { + .mm = mm, + .pmd_entry = subpage_walk_pmd_entry, + }; + + /* + * We don't try too hard, we just mark all the vma in that range + * VM_NOHUGEPAGE and split them. + */ + vma = find_vma(mm, addr); + /* + * If the range is in unmapped range, just return + */ + if (vma && ((addr + len) <= vma->vm_start)) + return; + + while (vma) { + if (vma->vm_start >= (addr + len)) + break; + vma->vm_flags |= VM_NOHUGEPAGE; + subpage_proto_walk.private = vma; + walk_page_range(vma->vm_start, vma->vm_end, + &subpage_proto_walk); + vma = vma->vm_next; + } +} +#else +static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, + unsigned long len) +{ + return; +} +#endif + /* * Copy in a subpage protection map for an address range. * The map has 2 bits per 4k subpage, so 32 bits per 64k page. @@ -168,6 +215,7 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) return -EFAULT; down_write(&mm->mmap_sem); + subpage_mark_vma_nohuge(mm, addr, len); for (limit = addr + len; addr < limit; addr = next) { next = pmd_addr_end(addr, limit); err = -ENOMEM; diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 7df1c5edda87..36e44b4260eb 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -189,6 +189,7 @@ void tlb_flush(struct mmu_gather *tlb) void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, unsigned long end) { + int hugepage_shift; unsigned long flags; start = _ALIGN_DOWN(start, PAGE_SIZE); @@ -206,7 +207,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, local_irq_save(flags); arch_enter_lazy_mmu_mode(); for (; start < end; start += PAGE_SIZE) { - pte_t *ptep = find_linux_pte(mm->pgd, start); + pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, + &hugepage_shift); unsigned long pte; if (ptep == NULL) @@ -214,7 +216,37 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, pte = pte_val(*ptep); if (!(pte & _PAGE_HASHPTE)) continue; - hpte_need_flush(mm, start, ptep, pte, 0); + if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) + hpte_do_hugepage_flush(mm, start, (pmd_t *)pte); + else + hpte_need_flush(mm, start, ptep, pte, 0); + } + arch_leave_lazy_mmu_mode(); + local_irq_restore(flags); +} + +void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) +{ + pte_t *pte; + pte_t *start_pte; + unsigned long flags; + + addr = _ALIGN_DOWN(addr, PMD_SIZE); + /* Note: Normally, we should only ever use a batch within a + * PTE locked section. This violates the rule, but will work + * since we don't actually modify the PTEs, we just flush the + * hash while leaving the PTEs intact (including their reference + * to being hashed). This is not the most performance oriented + * way to do things but is fine for our needs here. + */ + local_irq_save(flags); + arch_enter_lazy_mmu_mode(); + start_pte = pte_offset_map(pmd, addr); + for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) { + unsigned long pteval = pte_val(*pte); + if (pteval & _PAGE_HASHPTE) + hpte_need_flush(mm, addr, pte, pteval, 0); + addr += PAGE_SIZE; } arch_leave_lazy_mmu_mode(); local_irq_restore(flags); diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 6888cad5103d..41cd68dee681 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -648,7 +648,7 @@ void __init early_init_mmu(void) __early_init_mmu(1); } -void __cpuinit early_init_mmu_secondary(void) +void early_init_mmu_secondary(void) { __early_init_mmu(0); } diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index c427ae36374a..bf56e33f8257 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -650,8 +650,7 @@ void bpf_jit_compile(struct sk_filter *fp) proglen = cgctx.idx * 4; alloclen = proglen + FUNCTION_DESCR_SIZE; - image = module_alloc(max_t(unsigned int, alloclen, - sizeof(struct work_struct))); + image = module_alloc(alloclen); if (!image) goto out; @@ -688,20 +687,8 @@ out: return; } -static void jit_free_defer(struct work_struct *arg) -{ - module_free(NULL, arg); -} - -/* run from softirq, we must use a work_struct to call - * module_free() from process context - */ void bpf_jit_free(struct sk_filter *fp) { - if (fp->bpf_func != sk_run_filter) { - struct work_struct *work = (struct work_struct *)fp->bpf_func; - - INIT_WORK(work, jit_free_defer); - schedule_work(work); - } + if (fp->bpf_func != sk_run_filter) + module_free(NULL, fp->bpf_func); } diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 29c6482890c8..a3985aee77fe 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -75,6 +75,11 @@ static unsigned int freeze_events_kernel = MMCR0_FCS; #define MMCR0_FCHV 0 #define MMCR0_PMCjCE MMCR0_PMCnCE +#define MMCR0_FC56 0 +#define MMCR0_PMAO 0 +#define MMCR0_EBE 0 +#define MMCR0_PMCC 0 +#define MMCR0_PMCC_U6 0 #define SPRN_MMCRA SPRN_MMCR2 #define MMCRA_SAMPLE_ENABLE 0 @@ -102,6 +107,15 @@ static inline int siar_valid(struct pt_regs *regs) return 1; } +static bool is_ebb_event(struct perf_event *event) { return false; } +static int ebb_event_check(struct perf_event *event) { return 0; } +static void ebb_event_add(struct perf_event *event) { } +static void ebb_switch_out(unsigned long mmcr0) { } +static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0) +{ + return mmcr0; +} + static inline void power_pmu_bhrb_enable(struct perf_event *event) {} static inline void power_pmu_bhrb_disable(struct perf_event *event) {} void power_pmu_flush_branch_stack(void) {} @@ -462,6 +476,89 @@ void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) return; } +static bool is_ebb_event(struct perf_event *event) +{ + /* + * This could be a per-PMU callback, but we'd rather avoid the cost. We + * check that the PMU supports EBB, meaning those that don't can still + * use bit 63 of the event code for something else if they wish. + */ + return (ppmu->flags & PPMU_EBB) && + ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1); +} + +static int ebb_event_check(struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + + /* Event and group leader must agree on EBB */ + if (is_ebb_event(leader) != is_ebb_event(event)) + return -EINVAL; + + if (is_ebb_event(event)) { + if (!(event->attach_state & PERF_ATTACH_TASK)) + return -EINVAL; + + if (!leader->attr.pinned || !leader->attr.exclusive) + return -EINVAL; + + if (event->attr.inherit || event->attr.sample_period || + event->attr.enable_on_exec || event->attr.freq) + return -EINVAL; + } + + return 0; +} + +static void ebb_event_add(struct perf_event *event) +{ + if (!is_ebb_event(event) || current->thread.used_ebb) + return; + + /* + * IFF this is the first time we've added an EBB event, set + * PMXE in the user MMCR0 so we can detect when it's cleared by + * userspace. We need this so that we can context switch while + * userspace is in the EBB handler (where PMXE is 0). + */ + current->thread.used_ebb = 1; + current->thread.mmcr0 |= MMCR0_PMXE; +} + +static void ebb_switch_out(unsigned long mmcr0) +{ + if (!(mmcr0 & MMCR0_EBE)) + return; + + current->thread.siar = mfspr(SPRN_SIAR); + current->thread.sier = mfspr(SPRN_SIER); + current->thread.sdar = mfspr(SPRN_SDAR); + current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK; + current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK; +} + +static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0) +{ + if (!ebb) + goto out; + + /* Enable EBB and read/write to all 6 PMCs for userspace */ + mmcr0 |= MMCR0_EBE | MMCR0_PMCC_U6; + + /* Add any bits from the user reg, FC or PMAO */ + mmcr0 |= current->thread.mmcr0; + + /* Be careful not to set PMXE if userspace had it cleared */ + if (!(current->thread.mmcr0 & MMCR0_PMXE)) + mmcr0 &= ~MMCR0_PMXE; + + mtspr(SPRN_SIAR, current->thread.siar); + mtspr(SPRN_SIER, current->thread.sier); + mtspr(SPRN_SDAR, current->thread.sdar); + mtspr(SPRN_MMCR2, current->thread.mmcr2); +out: + return mmcr0; +} #endif /* CONFIG_PPC64 */ static void perf_event_interrupt(struct pt_regs *regs); @@ -732,6 +829,13 @@ static void power_pmu_read(struct perf_event *event) if (!event->hw.idx) return; + + if (is_ebb_event(event)) { + val = read_pmc(event->hw.idx); + local64_set(&event->hw.prev_count, val); + return; + } + /* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. @@ -852,7 +956,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) static void power_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; - unsigned long flags; + unsigned long flags, mmcr0, val; if (!ppmu) return; @@ -860,9 +964,6 @@ static void power_pmu_disable(struct pmu *pmu) cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { - cpuhw->disabled = 1; - cpuhw->n_added = 0; - /* * Check if we ever enabled the PMU on this cpu. */ @@ -872,6 +973,21 @@ static void power_pmu_disable(struct pmu *pmu) } /* + * Set the 'freeze counters' bit, clear EBE/PMCC/PMAO/FC56. + */ + val = mmcr0 = mfspr(SPRN_MMCR0); + val |= MMCR0_FC; + val &= ~(MMCR0_EBE | MMCR0_PMCC | MMCR0_PMAO | MMCR0_FC56); + + /* + * The barrier is to make sure the mtspr has been + * executed and the PMU has frozen the events etc. + * before we return. + */ + write_mmcr0(cpuhw, val); + mb(); + + /* * Disable instruction sampling if it was enabled */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { @@ -880,15 +996,12 @@ static void power_pmu_disable(struct pmu *pmu) mb(); } - /* - * Set the 'freeze counters' bit. - * The barrier is to make sure the mtspr has been - * executed and the PMU has frozen the events - * before we return. - */ - write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); - mb(); + cpuhw->disabled = 1; + cpuhw->n_added = 0; + + ebb_switch_out(mmcr0); } + local_irq_restore(flags); } @@ -903,23 +1016,36 @@ static void power_pmu_enable(struct pmu *pmu) struct cpu_hw_events *cpuhw; unsigned long flags; long i; - unsigned long val; + unsigned long val, mmcr0; s64 left; unsigned int hwc_index[MAX_HWEVENTS]; int n_lim; int idx; + bool ebb; if (!ppmu) return; local_irq_save(flags); + cpuhw = &__get_cpu_var(cpu_hw_events); - if (!cpuhw->disabled) { - local_irq_restore(flags); - return; + if (!cpuhw->disabled) + goto out; + + if (cpuhw->n_events == 0) { + ppc_set_pmu_inuse(0); + goto out; } + cpuhw->disabled = 0; /* + * EBB requires an exclusive group and all events must have the EBB + * flag set, or not set, so we can just check a single event. Also we + * know we have at least one event. + */ + ebb = is_ebb_event(cpuhw->event[0]); + + /* * If we didn't change anything, or only removed events, * no need to recalculate MMCR* settings and reset the PMCs. * Just reenable the PMU with the current MMCR* settings @@ -928,8 +1054,6 @@ static void power_pmu_enable(struct pmu *pmu) if (!cpuhw->n_added) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); - if (cpuhw->n_events == 0) - ppc_set_pmu_inuse(0); goto out_enable; } @@ -996,25 +1120,34 @@ static void power_pmu_enable(struct pmu *pmu) ++n_lim; continue; } - val = 0; - if (event->hw.sample_period) { - left = local64_read(&event->hw.period_left); - if (left < 0x80000000L) - val = 0x80000000L - left; + + if (ebb) + val = local64_read(&event->hw.prev_count); + else { + val = 0; + if (event->hw.sample_period) { + left = local64_read(&event->hw.period_left); + if (left < 0x80000000L) + val = 0x80000000L - left; + } + local64_set(&event->hw.prev_count, val); } - local64_set(&event->hw.prev_count, val); + event->hw.idx = idx; if (event->hw.state & PERF_HES_STOPPED) val = 0; write_pmc(idx, val); + perf_event_update_userpage(event); } cpuhw->n_limited = n_lim; cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; out_enable: + mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); + mb(); - write_mmcr0(cpuhw, cpuhw->mmcr[0]); + write_mmcr0(cpuhw, mmcr0); /* * Enable instruction sampling if necessary @@ -1112,6 +1245,8 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) event->hw.config = cpuhw->events[n0]; nocheck: + ebb_event_add(event); + ++cpuhw->n_events; ++cpuhw->n_added; @@ -1472,6 +1607,11 @@ static int power_pmu_event_init(struct perf_event *event) } } + /* Extra checks for EBB */ + err = ebb_event_check(event); + if (err) + return err; + /* * If this is in a group, check if it can go on with all the * other hardware events in the group. We assume the event @@ -1511,6 +1651,13 @@ static int power_pmu_event_init(struct perf_event *event) local64_set(&event->hw.period_left, event->hw.last_period); /* + * For EBB events we just context switch the PMC value, we don't do any + * of the sample_period logic. We use hw.prev_count for this. + */ + if (is_ebb_event(event)) + local64_set(&event->hw.prev_count, 0); + + /* * See if we need to reserve the PMU. * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing @@ -1786,7 +1933,7 @@ static void power_pmu_setup(int cpu) cpuhw->mmcr[0] = MMCR0_FC; } -static int __cpuinit +static int power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; @@ -1803,7 +1950,7 @@ power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu return NOTIFY_OK; } -int __cpuinit register_power_pmu(struct power_pmu *pmu) +int register_power_pmu(struct power_pmu *pmu) { if (ppmu) return -EBUSY; /* something's already registered */ diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 13c3f0e547a2..d1821b8bbc4c 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -60,7 +60,7 @@ #define PME_PM_LD_REF_L1 0xc880 #define PME_PM_LD_MISS_L1 0x400f0 #define PME_PM_BRU_FIN 0x10068 -#define PME_PM_BRU_MPRED 0x400f6 +#define PME_PM_BR_MPRED 0x400f6 #define PME_PM_CMPLU_STALL_FXU 0x20014 #define PME_PM_CMPLU_STALL_DIV 0x40014 @@ -349,7 +349,7 @@ static int power7_generic_events[] = { [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1, [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN, - [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED, + [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BR_MPRED, }; #define C(x) PERF_COUNT_HW_CACHE_##x @@ -405,7 +405,7 @@ GENERIC_EVENT_ATTR(instructions, INST_CMPL); GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); -GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED); +GENERIC_EVENT_ATTR(branch-misses, BR_MPRED); POWER_EVENT_ATTR(CYC, CYC); POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); @@ -414,7 +414,7 @@ POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1); POWER_EVENT_ATTR(BRU_FIN, BRU_FIN) -POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED); +POWER_EVENT_ATTR(BR_MPRED, BR_MPRED); POWER_EVENT_ATTR(CMPLU_STALL_FXU, CMPLU_STALL_FXU); POWER_EVENT_ATTR(CMPLU_STALL_DIV, CMPLU_STALL_DIV); @@ -449,7 +449,7 @@ static struct attribute *power7_events_attr[] = { GENERIC_EVENT_PTR(LD_REF_L1), GENERIC_EVENT_PTR(LD_MISS_L1), GENERIC_EVENT_PTR(BRU_FIN), - GENERIC_EVENT_PTR(BRU_MPRED), + GENERIC_EVENT_PTR(BR_MPRED), POWER_EVENT_PTR(CYC), POWER_EVENT_PTR(GCT_NOSLOT_CYC), @@ -458,7 +458,7 @@ static struct attribute *power7_events_attr[] = { POWER_EVENT_PTR(LD_REF_L1), POWER_EVENT_PTR(LD_MISS_L1), POWER_EVENT_PTR(BRU_FIN), - POWER_EVENT_PTR(BRU_MPRED), + POWER_EVENT_PTR(BR_MPRED), POWER_EVENT_PTR(CMPLU_STALL_FXU), POWER_EVENT_PTR(CMPLU_STALL_DIV), diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index f7d1c4fff303..96a64d6a8bdf 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -31,9 +31,9 @@ * * 60 56 52 48 44 40 36 32 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - * [ thresh_cmp ] [ thresh_ctl ] - * | - * thresh start/stop OR FAB match -* + * | [ thresh_cmp ] [ thresh_ctl ] + * | | + * *- EBB (Linux) thresh start/stop OR FAB match -* * * 28 24 20 16 12 8 4 0 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | @@ -85,6 +85,7 @@ * */ +#define EVENT_EBB_MASK 1ull #define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */ #define EVENT_THR_CMP_MASK 0x3ff #define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */ @@ -109,6 +110,17 @@ #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */ +#define EVENT_VALID_MASK \ + ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ + (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ + (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \ + (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \ + (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ + (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ + (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ + (EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT) | \ + EVENT_PSEL_MASK) + /* MMCRA IFM bits - POWER8 */ #define POWER8_MMCRA_IFM1 0x0000000040000000UL #define POWER8_MMCRA_IFM2 0x0000000080000000UL @@ -130,10 +142,10 @@ * * 28 24 20 16 12 8 4 0 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - * [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] - * | | - * L1 I/D qualifier -* | Count of events for each PMC. - * | p1, p2, p3, p4, p5, p6. + * | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] + * EBB -* | | + * | | Count of events for each PMC. + * L1 I/D qualifier -* | p1, p2, p3, p4, p5, p6. * nc - number of counters -* * * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints @@ -149,6 +161,9 @@ #define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32) #define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK) +#define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24) +#define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK) + #define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22) #define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3) @@ -207,14 +222,21 @@ static inline bool event_is_fab_match(u64 event) static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { - unsigned int unit, pmc, cache; + unsigned int unit, pmc, cache, ebb; unsigned long mask, value; mask = value = 0; - pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; - unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; - cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; + if (event & ~EVENT_VALID_MASK) + return -1; + + pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; + unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; + cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; + ebb = (event >> EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK; + + /* Clear the EBB bit in the event, so event checks work below */ + event &= ~(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT); if (pmc) { if (pmc > 6) @@ -284,6 +306,18 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); } + if (!pmc && ebb) + /* EBB events must specify the PMC */ + return -1; + + /* + * All events must agree on EBB, either all request it or none. + * EBB events are pinned & exclusive, so this should never actually + * hit, but we leave it as a fallback in case. + */ + mask |= CNST_EBB_VAL(ebb); + value |= CNST_EBB_MASK; + *maskp = mask; *valp = value; @@ -378,6 +412,10 @@ static int power8_compute_mmcr(u64 event[], int n_ev, if (pmc_inuse & 0x7c) mmcr[0] |= MMCR0_PMCjCE; + /* If we're not using PMC 5 or 6, freeze them */ + if (!(pmc_inuse & 0x60)) + mmcr[0] |= MMCR0_FC56; + mmcr[1] = mmcr1; mmcr[2] = mmcra; @@ -574,7 +612,7 @@ static struct power_pmu power8_pmu = { .get_constraint = power8_get_constraint, .get_alternatives = power8_get_alternatives, .disable_pmc = power8_disable_pmc, - .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB, + .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, .n_generic = ARRAY_SIZE(power8_generic_events), .generic_events = power8_generic_events, .attr_groups = power8_pmu_attr_groups, diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c index ecd3890c40d7..7f1b71a01c6a 100644 --- a/arch/powerpc/platforms/44x/currituck.c +++ b/arch/powerpc/platforms/44x/currituck.c @@ -91,12 +91,12 @@ static void __init ppc47x_init_irq(void) } #ifdef CONFIG_SMP -static void __cpuinit smp_ppc47x_setup_cpu(int cpu) +static void smp_ppc47x_setup_cpu(int cpu) { mpic_setup_this_cpu(); } -static int __cpuinit smp_ppc47x_kick_cpu(int cpu) +static int smp_ppc47x_kick_cpu(int cpu) { struct device_node *cpunode = of_get_cpu_node(cpu, NULL); const u64 *spin_table_addr_prop; @@ -176,13 +176,48 @@ static int __init ppc47x_probe(void) return 1; } +static int board_rev = -1; +static int __init ppc47x_get_board_rev(void) +{ + u8 fpga_reg0; + void *fpga; + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "ibm,currituck-fpga"); + if (!np) + goto fail; + + fpga = of_iomap(np, 0); + of_node_put(np); + if (!fpga) + goto fail; + + fpga_reg0 = ioread8(fpga); + board_rev = fpga_reg0 & 0x03; + pr_info("%s: Found board revision %d\n", __func__, board_rev); + iounmap(fpga); + return 0; + +fail: + pr_info("%s: Unable to find board revision\n", __func__); + return 0; +} +machine_arch_initcall(ppc47x, ppc47x_get_board_rev); + /* Use USB controller should have been hardware swizzled but it wasn't :( */ static void ppc47x_pci_irq_fixup(struct pci_dev *dev) { if (dev->vendor == 0x1033 && (dev->device == 0x0035 || dev->device == 0x00e0)) { - dev->irq = irq_create_mapping(NULL, 47); - pr_info("%s: Mapping irq 47 %d\n", __func__, dev->irq); + if (board_rev == 0) { + dev->irq = irq_create_mapping(NULL, 47); + pr_info("%s: Mapping irq %d\n", __func__, dev->irq); + } else if (board_rev == 2) { + dev->irq = irq_create_mapping(NULL, 49); + pr_info("%s: Mapping irq %d\n", __func__, dev->irq); + } else { + pr_alert("%s: Unknown board revision\n", __func__); + } } } diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c index a28a8629727e..4241bc825800 100644 --- a/arch/powerpc/platforms/44x/iss4xx.c +++ b/arch/powerpc/platforms/44x/iss4xx.c @@ -81,12 +81,12 @@ static void __init iss4xx_init_irq(void) } #ifdef CONFIG_SMP -static void __cpuinit smp_iss4xx_setup_cpu(int cpu) +static void smp_iss4xx_setup_cpu(int cpu) { mpic_setup_this_cpu(); } -static int __cpuinit smp_iss4xx_kick_cpu(int cpu) +static int smp_iss4xx_kick_cpu(int cpu) { struct device_node *cpunode = of_get_cpu_node(cpu, NULL); const u64 *spin_table_addr_prop; diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c index 0a134e0469ef..3e90ece10ae9 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads.c @@ -43,9 +43,7 @@ static void __init mpc5121_ads_setup_arch(void) mpc83xx_add_bridge(np); #endif -#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) - mpc512x_setup_diu(); -#endif + mpc512x_setup_arch(); } static void __init mpc5121_ads_init_IRQ(void) @@ -69,7 +67,7 @@ define_machine(mpc5121_ads) { .probe = mpc5121_ads_probe, .setup_arch = mpc5121_ads_setup_arch, .init = mpc512x_init, - .init_early = mpc512x_init_diu, + .init_early = mpc512x_init_early, .init_IRQ = mpc5121_ads_init_IRQ, .get_irq = ipic_get_irq, .calibrate_decr = generic_calibrate_decr, diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h index 0a8e60023944..cc97f022d028 100644 --- a/arch/powerpc/platforms/512x/mpc512x.h +++ b/arch/powerpc/platforms/512x/mpc512x.h @@ -12,18 +12,12 @@ #ifndef __MPC512X_H__ #define __MPC512X_H__ extern void __init mpc512x_init_IRQ(void); +extern void __init mpc512x_init_early(void); extern void __init mpc512x_init(void); +extern void __init mpc512x_setup_arch(void); extern int __init mpc5121_clk_init(void); -void __init mpc512x_declare_of_platform_devices(void); extern const char *mpc512x_select_psc_compat(void); +extern const char *mpc512x_select_reset_compat(void); extern void mpc512x_restart(char *cmd); -#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) -void mpc512x_init_diu(void); -void mpc512x_setup_diu(void); -#else -#define mpc512x_init_diu NULL -#define mpc512x_setup_diu NULL -#endif - #endif /* __MPC512X_H__ */ diff --git a/arch/powerpc/platforms/512x/mpc512x_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c index 5fb919b30924..ce71408781a0 100644 --- a/arch/powerpc/platforms/512x/mpc512x_generic.c +++ b/arch/powerpc/platforms/512x/mpc512x_generic.c @@ -45,8 +45,8 @@ define_machine(mpc512x_generic) { .name = "MPC512x generic", .probe = mpc512x_generic_probe, .init = mpc512x_init, - .init_early = mpc512x_init_diu, - .setup_arch = mpc512x_setup_diu, + .init_early = mpc512x_init_early, + .setup_arch = mpc512x_setup_arch, .init_IRQ = mpc512x_init_IRQ, .get_irq = ipic_get_irq, .calibrate_decr = generic_calibrate_decr, diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index 6eb94ab99d39..a82a41b4fd91 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -35,8 +35,10 @@ static struct mpc512x_reset_module __iomem *reset_module_base; static void __init mpc512x_restart_init(void) { struct device_node *np; + const char *reset_compat; - np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset"); + reset_compat = mpc512x_select_reset_compat(); + np = of_find_compatible_node(NULL, NULL, reset_compat); if (!np) return; @@ -58,7 +60,7 @@ void mpc512x_restart(char *cmd) ; } -#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) +#if IS_ENABLED(CONFIG_FB_FSL_DIU) struct fsl_diu_shared_fb { u8 gamma[0x300]; /* 32-bit aligned! */ @@ -355,6 +357,17 @@ const char *mpc512x_select_psc_compat(void) return NULL; } +const char *mpc512x_select_reset_compat(void) +{ + if (of_machine_is_compatible("fsl,mpc5121")) + return "fsl,mpc5121-reset"; + + if (of_machine_is_compatible("fsl,mpc5125")) + return "fsl,mpc5125-reset"; + + return NULL; +} + static unsigned int __init get_fifo_size(struct device_node *np, char *prop_name) { @@ -436,14 +449,26 @@ void __init mpc512x_psc_fifo_init(void) } } +void __init mpc512x_init_early(void) +{ + mpc512x_restart_init(); + if (IS_ENABLED(CONFIG_FB_FSL_DIU)) + mpc512x_init_diu(); +} + void __init mpc512x_init(void) { mpc5121_clk_init(); mpc512x_declare_of_platform_devices(); - mpc512x_restart_init(); mpc512x_psc_fifo_init(); } +void __init mpc512x_setup_arch(void) +{ + if (IS_ENABLED(CONFIG_FB_FSL_DIU)) + mpc512x_setup_diu(); +} + /** * mpc512x_cs_config - Setup chip select configuration * @cs: chip select number diff --git a/arch/powerpc/platforms/512x/pdm360ng.c b/arch/powerpc/platforms/512x/pdm360ng.c index 0575e858291c..24b314d7bd5f 100644 --- a/arch/powerpc/platforms/512x/pdm360ng.c +++ b/arch/powerpc/platforms/512x/pdm360ng.c @@ -119,9 +119,9 @@ static int __init pdm360ng_probe(void) define_machine(pdm360ng) { .name = "PDM360NG", .probe = pdm360ng_probe, - .setup_arch = mpc512x_setup_diu, + .setup_arch = mpc512x_setup_arch, .init = pdm360ng_init, - .init_early = mpc512x_init_diu, + .init_early = mpc512x_init_early, .init_IRQ = mpc512x_init_IRQ, .get_irq = ipic_get_irq, .calibrate_decr = generic_calibrate_decr, diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 624cb51d19c9..7bc315822935 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -231,17 +231,7 @@ static struct i2c_driver mcu_driver = { .id_table = mcu_ids, }; -static int __init mcu_init(void) -{ - return i2c_add_driver(&mcu_driver); -} -module_init(mcu_init); - -static void __exit mcu_exit(void) -{ - i2c_del_driver(&mcu_driver); -} -module_exit(mcu_exit); +module_i2c_driver(mcu_driver); MODULE_DESCRIPTION("Power Management and GPIO expander driver for " "MPC8349E-mITX-compatible MCU"); diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c index 753a42c29d4d..39cfa4044e6c 100644 --- a/arch/powerpc/platforms/85xx/p5020_ds.c +++ b/arch/powerpc/platforms/85xx/p5020_ds.c @@ -75,12 +75,7 @@ define_machine(p5020_ds) { #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif -/* coreint doesn't play nice with lazy EE, use legacy mpic for now */ -#ifdef CONFIG_PPC64 - .get_irq = mpic_get_irq, -#else .get_irq = mpic_get_coreint_irq, -#endif .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, diff --git a/arch/powerpc/platforms/85xx/p5040_ds.c b/arch/powerpc/platforms/85xx/p5040_ds.c index 11381851828e..f70e74cddf97 100644 --- a/arch/powerpc/platforms/85xx/p5040_ds.c +++ b/arch/powerpc/platforms/85xx/p5040_ds.c @@ -66,12 +66,7 @@ define_machine(p5040_ds) { #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif -/* coreint doesn't play nice with lazy EE, use legacy mpic for now */ -#ifdef CONFIG_PPC64 - .get_irq = mpic_get_irq, -#else .get_irq = mpic_get_coreint_irq, -#endif .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 6a1759939c6b..5ced4f5bb2b2 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -99,7 +99,7 @@ static void mpc85xx_take_timebase(void) } #ifdef CONFIG_HOTPLUG_CPU -static void __cpuinit smp_85xx_mach_cpu_die(void) +static void smp_85xx_mach_cpu_die(void) { unsigned int cpu = smp_processor_id(); u32 tmp; @@ -141,7 +141,7 @@ static inline u32 read_spin_table_addr_l(void *spin_table) return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); } -static int __cpuinit smp_85xx_kick_cpu(int nr) +static int smp_85xx_kick_cpu(int nr) { unsigned long flags; const u64 *cpu_rel_addr; @@ -362,7 +362,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image) } #endif /* CONFIG_KEXEC */ -static void __cpuinit smp_85xx_setup_cpu(int cpu_nr) +static void smp_85xx_setup_cpu(int cpu_nr) { if (smp_85xx_ops.probe == smp_mpic_probe) mpic_setup_this_cpu(); diff --git a/arch/powerpc/platforms/85xx/t4240_qds.c b/arch/powerpc/platforms/85xx/t4240_qds.c index 5998e9f33304..91ead6b1b8af 100644 --- a/arch/powerpc/platforms/85xx/t4240_qds.c +++ b/arch/powerpc/platforms/85xx/t4240_qds.c @@ -75,12 +75,7 @@ define_machine(t4240_qds) { #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif -/* coreint doesn't play nice with lazy EE, use legacy mpic for now */ -#ifdef CONFIG_PPC64 - .get_irq = mpic_get_irq, -#else .get_irq = mpic_get_coreint_irq, -#endif .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 1e121088826f..587a2828b06c 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -43,6 +43,7 @@ static irqreturn_t timebase_interrupt(int irq, void *dev) static struct irqaction tbint_irqaction = { .handler = timebase_interrupt, + .flags = IRQF_NO_THREAD, .name = "tbint", }; @@ -218,19 +219,12 @@ void mpc8xx_restart(char *cmd) static void cpm_cascade(unsigned int irq, struct irq_desc *desc) { - struct irq_chip *chip; - int cascade_irq; - - if ((cascade_irq = cpm_get_irq()) >= 0) { - struct irq_desc *cdesc = irq_to_desc(cascade_irq); + struct irq_chip *chip = irq_desc_get_chip(desc); + int cascade_irq = cpm_get_irq(); + if (cascade_irq >= 0) generic_handle_irq(cascade_irq); - chip = irq_desc_get_chip(cdesc); - chip->irq_eoi(&cdesc->irq_data); - } - - chip = irq_desc_get_chip(desc); chip->irq_eoi(&desc->irq_data); } diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index b62aab3e22ec..d703775bda30 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -86,6 +86,27 @@ config MPIC bool default n +config MPIC_TIMER + bool "MPIC Global Timer" + depends on MPIC && FSL_SOC + default n + help + The MPIC global timer is a hardware timer inside the + Freescale PIC complying with OpenPIC standard. When the + specified interval times out, the hardware timer generates + an interrupt. The driver currently is only tested on fsl + chip, but it can potentially support other global timers + complying with the OpenPIC standard. + +config FSL_MPIC_TIMER_WAKEUP + tristate "Freescale MPIC global timer wakeup driver" + depends on FSL_SOC && MPIC_TIMER && PM + default n + help + The driver provides a way to wake up the system by MPIC + timer. + e.g. "echo 5 > /sys/devices/system/mpic/timer_wakeup" + config PPC_EPAPR_HV_PIC bool default n @@ -164,6 +185,11 @@ config IBMEBUS help Bus device driver for GX bus based adapters. +config EEH + bool + depends on (PPC_POWERNV || PPC_PSERIES) && PCI + default y + config PPC_MPC106 bool default n @@ -193,37 +219,6 @@ config PPC_IO_WORKAROUNDS source "drivers/cpufreq/Kconfig" -menu "CPU Frequency drivers" - depends on CPU_FREQ - -config CPU_FREQ_PMAC - bool "Support for Apple PowerBooks" - depends on ADB_PMU && PPC32 - select CPU_FREQ_TABLE - help - This adds support for frequency switching on Apple PowerBooks, - this currently includes some models of iBook & Titanium - PowerBook. - -config CPU_FREQ_PMAC64 - bool "Support for some Apple G5s" - depends on PPC_PMAC && PPC64 - select CPU_FREQ_TABLE - help - This adds support for frequency switching on Apple iMac G5, - and some of the more recent desktop G5 machines as well. - -config PPC_PASEMI_CPUFREQ - bool "Support for PA Semi PWRficient" - depends on PPC_PASEMI - default y - select CPU_FREQ_TABLE - help - This adds the support for frequency switching on PA Semi - PWRficient processors. - -endmenu - menu "CPUIdle driver" source "drivers/cpuidle/Kconfig" diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 7819c40a6bc3..47d9a03dd415 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -71,6 +71,7 @@ config PPC_BOOK3S_64 select PPC_FPU select PPC_HAVE_PMU_SUPPORT select SYS_SUPPORTS_HUGETLBFS + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if PPC_64K_PAGES config PPC_BOOK3E_64 bool "Embedded processors" diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c index 246e1d8b3af3..c34ee4e60873 100644 --- a/arch/powerpc/platforms/cell/beat_htab.c +++ b/arch/powerpc/platforms/cell/beat_htab.c @@ -185,7 +185,8 @@ static void beat_lpar_hptab_clear(void) static long beat_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long vpn, - int psize, int ssize, int local) + int psize, int apsize, + int ssize, int local) { unsigned long lpar_rc; u64 dummy0, dummy1; @@ -274,7 +275,8 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp, } static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, - int psize, int ssize, int local) + int psize, int apsize, + int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; @@ -364,9 +366,10 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, * already zero. For now I am paranoid. */ static long beat_lpar_hpte_updatepp_v3(unsigned long slot, - unsigned long newpp, - unsigned long vpn, - int psize, int ssize, int local) + unsigned long newpp, + unsigned long vpn, + int psize, int apsize, + int ssize, int local) { unsigned long lpar_rc; unsigned long want_v; @@ -394,7 +397,8 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot, } static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn, - int psize, int ssize, int local) + int psize, int apsize, + int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index 8c6dc42ecf65..9e5dfbcc00af 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -239,7 +239,7 @@ void __init beatic_init_IRQ(void) ppc_md.get_irq = beatic_get_irq; /* Allocate an irq host */ - beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL); + beatic_host = irq_domain_add_nomap(NULL, ~0, &beatic_pic_host_ops, NULL); BUG_ON(beatic_host == NULL); irq_set_default_host(beatic_host); } diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index d35dbbc8ec79..f75f6fcac729 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -142,7 +142,7 @@ static int smp_cell_cpu_bootable(unsigned int nr) * during boot if the user requests it. Odd-numbered * cpus are assumed to be secondary threads. */ - if (system_state < SYSTEM_RUNNING && + if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT) && !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) return 0; diff --git a/arch/powerpc/platforms/pasemi/Makefile b/arch/powerpc/platforms/pasemi/Makefile index ce6d789e0741..8e8d4cae5ebe 100644 --- a/arch/powerpc/platforms/pasemi/Makefile +++ b/arch/powerpc/platforms/pasemi/Makefile @@ -1,3 +1,2 @@ obj-y += setup.o pci.o time.o idle.o powersave.o iommu.o dma_lib.o misc.o obj-$(CONFIG_PPC_PASEMI_MDIO) += gpio_mdio.o -obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += cpufreq.o diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c deleted file mode 100644 index be1e7958909e..000000000000 --- a/arch/powerpc/platforms/pasemi/cpufreq.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright (C) 2007 PA Semi, Inc - * - * Authors: Egor Martovetsky <egor@pasemi.com> - * Olof Johansson <olof@lixom.net> - * - * Maintained by: Olof Johansson <olof@lixom.net> - * - * Based on arch/powerpc/platforms/cell/cbe_cpufreq.c: - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -#include <linux/cpufreq.h> -#include <linux/timer.h> -#include <linux/module.h> - -#include <asm/hw_irq.h> -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/time.h> -#include <asm/smp.h> - -#define SDCASR_REG 0x0100 -#define SDCASR_REG_STRIDE 0x1000 -#define SDCPWR_CFGA0_REG 0x0100 -#define SDCPWR_PWST0_REG 0x0000 -#define SDCPWR_GIZTIME_REG 0x0440 - -/* SDCPWR_GIZTIME_REG fields */ -#define SDCPWR_GIZTIME_GR 0x80000000 -#define SDCPWR_GIZTIME_LONGLOCK 0x000000ff - -/* Offset of ASR registers from SDC base */ -#define SDCASR_OFFSET 0x120000 - -static void __iomem *sdcpwr_mapbase; -static void __iomem *sdcasr_mapbase; - -static DEFINE_MUTEX(pas_switch_mutex); - -/* Current astate, is used when waking up from power savings on - * one core, in case the other core has switched states during - * the idle time. - */ -static int current_astate; - -/* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */ -static struct cpufreq_frequency_table pas_freqs[] = { - {0, 0}, - {1, 0}, - {2, 0}, - {3, 0}, - {4, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -static struct freq_attr *pas_cpu_freqs_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -/* - * hardware specific functions - */ - -static int get_astate_freq(int astate) -{ - u32 ret; - ret = in_le32(sdcpwr_mapbase + SDCPWR_CFGA0_REG + (astate * 0x10)); - - return ret & 0x3f; -} - -static int get_cur_astate(int cpu) -{ - u32 ret; - - ret = in_le32(sdcpwr_mapbase + SDCPWR_PWST0_REG); - ret = (ret >> (cpu * 4)) & 0x7; - - return ret; -} - -static int get_gizmo_latency(void) -{ - u32 giztime, ret; - - giztime = in_le32(sdcpwr_mapbase + SDCPWR_GIZTIME_REG); - - /* just provide the upper bound */ - if (giztime & SDCPWR_GIZTIME_GR) - ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 128000; - else - ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 1000; - - return ret; -} - -static void set_astate(int cpu, unsigned int astate) -{ - unsigned long flags; - - /* Return if called before init has run */ - if (unlikely(!sdcasr_mapbase)) - return; - - local_irq_save(flags); - - out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate); - - local_irq_restore(flags); -} - -int check_astate(void) -{ - return get_cur_astate(hard_smp_processor_id()); -} - -void restore_astate(int cpu) -{ - set_astate(cpu, current_astate); -} - -/* - * cpufreq functions - */ - -static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - const u32 *max_freqp; - u32 max_freq; - int i, cur_astate; - struct resource res; - struct device_node *cpu, *dn; - int err = -ENODEV; - - cpu = of_get_cpu_node(policy->cpu, NULL); - - if (!cpu) - goto out; - - dn = of_find_compatible_node(NULL, NULL, "1682m-sdc"); - if (!dn) - dn = of_find_compatible_node(NULL, NULL, - "pasemi,pwrficient-sdc"); - if (!dn) - goto out; - err = of_address_to_resource(dn, 0, &res); - of_node_put(dn); - if (err) - goto out; - sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000); - if (!sdcasr_mapbase) { - err = -EINVAL; - goto out; - } - - dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo"); - if (!dn) - dn = of_find_compatible_node(NULL, NULL, - "pasemi,pwrficient-gizmo"); - if (!dn) { - err = -ENODEV; - goto out_unmap_sdcasr; - } - err = of_address_to_resource(dn, 0, &res); - of_node_put(dn); - if (err) - goto out_unmap_sdcasr; - sdcpwr_mapbase = ioremap(res.start, 0x1000); - if (!sdcpwr_mapbase) { - err = -EINVAL; - goto out_unmap_sdcasr; - } - - pr_debug("init cpufreq on CPU %d\n", policy->cpu); - - max_freqp = of_get_property(cpu, "clock-frequency", NULL); - if (!max_freqp) { - err = -EINVAL; - goto out_unmap_sdcpwr; - } - - /* we need the freq in kHz */ - max_freq = *max_freqp / 1000; - - pr_debug("max clock-frequency is at %u kHz\n", max_freq); - pr_debug("initializing frequency table\n"); - - /* initialize frequency table */ - for (i=0; pas_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { - pas_freqs[i].frequency = get_astate_freq(pas_freqs[i].index) * 100000; - pr_debug("%d: %d\n", i, pas_freqs[i].frequency); - } - - policy->cpuinfo.transition_latency = get_gizmo_latency(); - - cur_astate = get_cur_astate(policy->cpu); - pr_debug("current astate is at %d\n",cur_astate); - - policy->cur = pas_freqs[cur_astate].frequency; - cpumask_copy(policy->cpus, cpu_online_mask); - - ppc_proc_freq = policy->cur * 1000ul; - - cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu); - - /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max - * are set correctly - */ - return cpufreq_frequency_table_cpuinfo(policy, pas_freqs); - -out_unmap_sdcpwr: - iounmap(sdcpwr_mapbase); - -out_unmap_sdcasr: - iounmap(sdcasr_mapbase); -out: - return err; -} - -static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - /* - * We don't support CPU hotplug. Don't unmap after the system - * has already made it to a running state. - */ - if (system_state != SYSTEM_BOOTING) - return 0; - - if (sdcasr_mapbase) - iounmap(sdcasr_mapbase); - if (sdcpwr_mapbase) - iounmap(sdcpwr_mapbase); - - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static int pas_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, pas_freqs); -} - -static int pas_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_freqs freqs; - int pas_astate_new; - int i; - - cpufreq_frequency_table_target(policy, - pas_freqs, - target_freq, - relation, - &pas_astate_new); - - freqs.old = policy->cur; - freqs.new = pas_freqs[pas_astate_new].frequency; - - mutex_lock(&pas_switch_mutex); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", - policy->cpu, - pas_freqs[pas_astate_new].frequency, - pas_freqs[pas_astate_new].index); - - current_astate = pas_astate_new; - - for_each_online_cpu(i) - set_astate(i, pas_astate_new); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - mutex_unlock(&pas_switch_mutex); - - ppc_proc_freq = freqs.new * 1000ul; - return 0; -} - -static struct cpufreq_driver pas_cpufreq_driver = { - .name = "pas-cpufreq", - .owner = THIS_MODULE, - .flags = CPUFREQ_CONST_LOOPS, - .init = pas_cpufreq_cpu_init, - .exit = pas_cpufreq_cpu_exit, - .verify = pas_cpufreq_verify, - .target = pas_cpufreq_target, - .attr = pas_cpu_freqs_attr, -}; - -/* - * module init and destoy - */ - -static int __init pas_cpufreq_init(void) -{ - if (!of_machine_is_compatible("PA6T-1682M") && - !of_machine_is_compatible("pasemi,pwrficient")) - return -ENODEV; - - return cpufreq_register_driver(&pas_cpufreq_driver); -} - -static void __exit pas_cpufreq_exit(void) -{ - cpufreq_unregister_driver(&pas_cpufreq_driver); -} - -module_init(pas_cpufreq_init); -module_exit(pas_cpufreq_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>"); diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index ea47df66fee5..52c6ce1cc985 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -9,8 +9,6 @@ obj-y += pic.o setup.o time.o feature.o pci.o \ sleep.o low_i2c.o cache.o pfunc_core.o \ pfunc_base.o udbg_scc.o udbg_adb.o obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o -obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o -obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o # CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really # CONFIG_NVRAM=y diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c deleted file mode 100644 index 3104fad82480..000000000000 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ /dev/null @@ -1,721 +0,0 @@ -/* - * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> - * Copyright (C) 2004 John Steele Scott <toojays@toojays.net> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * TODO: Need a big cleanup here. Basically, we need to have different - * cpufreq_driver structures for the different type of HW instead of the - * current mess. We also need to better deal with the detection of the - * type of machine. - * - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/adb.h> -#include <linux/pmu.h> -#include <linux/cpufreq.h> -#include <linux/init.h> -#include <linux/device.h> -#include <linux/hardirq.h> -#include <asm/prom.h> -#include <asm/machdep.h> -#include <asm/irq.h> -#include <asm/pmac_feature.h> -#include <asm/mmu_context.h> -#include <asm/sections.h> -#include <asm/cputable.h> -#include <asm/time.h> -#include <asm/mpic.h> -#include <asm/keylargo.h> -#include <asm/switch_to.h> - -/* WARNING !!! This will cause calibrate_delay() to be called, - * but this is an __init function ! So you MUST go edit - * init/main.c to make it non-init before enabling DEBUG_FREQ - */ -#undef DEBUG_FREQ - -extern void low_choose_7447a_dfs(int dfs); -extern void low_choose_750fx_pll(int pll); -extern void low_sleep_handler(void); - -/* - * Currently, PowerMac cpufreq supports only high & low frequencies - * that are set by the firmware - */ -static unsigned int low_freq; -static unsigned int hi_freq; -static unsigned int cur_freq; -static unsigned int sleep_freq; -static unsigned long transition_latency; - -/* - * Different models uses different mechanisms to switch the frequency - */ -static int (*set_speed_proc)(int low_speed); -static unsigned int (*get_speed_proc)(void); - -/* - * Some definitions used by the various speedprocs - */ -static u32 voltage_gpio; -static u32 frequency_gpio; -static u32 slew_done_gpio; -static int no_schedule; -static int has_cpu_l2lve; -static int is_pmu_based; - -/* There are only two frequency states for each processor. Values - * are in kHz for the time being. - */ -#define CPUFREQ_HIGH 0 -#define CPUFREQ_LOW 1 - -static struct cpufreq_frequency_table pmac_cpu_freqs[] = { - {CPUFREQ_HIGH, 0}, - {CPUFREQ_LOW, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -static struct freq_attr* pmac_cpu_freqs_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static inline void local_delay(unsigned long ms) -{ - if (no_schedule) - mdelay(ms); - else - msleep(ms); -} - -#ifdef DEBUG_FREQ -static inline void debug_calc_bogomips(void) -{ - /* This will cause a recalc of bogomips and display the - * result. We backup/restore the value to avoid affecting the - * core cpufreq framework's own calculation. - */ - unsigned long save_lpj = loops_per_jiffy; - calibrate_delay(); - loops_per_jiffy = save_lpj; -} -#endif /* DEBUG_FREQ */ - -/* Switch CPU speed under 750FX CPU control - */ -static int cpu_750fx_cpu_speed(int low_speed) -{ - u32 hid2; - - if (low_speed == 0) { - /* ramping up, set voltage first */ - pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); - /* Make sure we sleep for at least 1ms */ - local_delay(10); - - /* tweak L2 for high voltage */ - if (has_cpu_l2lve) { - hid2 = mfspr(SPRN_HID2); - hid2 &= ~0x2000; - mtspr(SPRN_HID2, hid2); - } - } -#ifdef CONFIG_6xx - low_choose_750fx_pll(low_speed); -#endif - if (low_speed == 1) { - /* tweak L2 for low voltage */ - if (has_cpu_l2lve) { - hid2 = mfspr(SPRN_HID2); - hid2 |= 0x2000; - mtspr(SPRN_HID2, hid2); - } - - /* ramping down, set voltage last */ - pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); - local_delay(10); - } - - return 0; -} - -static unsigned int cpu_750fx_get_cpu_speed(void) -{ - if (mfspr(SPRN_HID1) & HID1_PS) - return low_freq; - else - return hi_freq; -} - -/* Switch CPU speed using DFS */ -static int dfs_set_cpu_speed(int low_speed) -{ - if (low_speed == 0) { - /* ramping up, set voltage first */ - pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); - /* Make sure we sleep for at least 1ms */ - local_delay(1); - } - - /* set frequency */ -#ifdef CONFIG_6xx - low_choose_7447a_dfs(low_speed); -#endif - udelay(100); - - if (low_speed == 1) { - /* ramping down, set voltage last */ - pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); - local_delay(1); - } - - return 0; -} - -static unsigned int dfs_get_cpu_speed(void) -{ - if (mfspr(SPRN_HID1) & HID1_DFS) - return low_freq; - else - return hi_freq; -} - - -/* Switch CPU speed using slewing GPIOs - */ -static int gpios_set_cpu_speed(int low_speed) -{ - int gpio, timeout = 0; - - /* If ramping up, set voltage first */ - if (low_speed == 0) { - pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); - /* Delay is way too big but it's ok, we schedule */ - local_delay(10); - } - - /* Set frequency */ - gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0); - if (low_speed == ((gpio & 0x01) == 0)) - goto skip; - - pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio, - low_speed ? 0x04 : 0x05); - udelay(200); - do { - if (++timeout > 100) - break; - local_delay(1); - gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0); - } while((gpio & 0x02) == 0); - skip: - /* If ramping down, set voltage last */ - if (low_speed == 1) { - pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); - /* Delay is way too big but it's ok, we schedule */ - local_delay(10); - } - -#ifdef DEBUG_FREQ - debug_calc_bogomips(); -#endif - - return 0; -} - -/* Switch CPU speed under PMU control - */ -static int pmu_set_cpu_speed(int low_speed) -{ - struct adb_request req; - unsigned long save_l2cr; - unsigned long save_l3cr; - unsigned int pic_prio; - unsigned long flags; - - preempt_disable(); - -#ifdef DEBUG_FREQ - printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1)); -#endif - pmu_suspend(); - - /* Disable all interrupt sources on openpic */ - pic_prio = mpic_cpu_get_priority(); - mpic_cpu_set_priority(0xf); - - /* Make sure the decrementer won't interrupt us */ - asm volatile("mtdec %0" : : "r" (0x7fffffff)); - /* Make sure any pending DEC interrupt occurring while we did - * the above didn't re-enable the DEC */ - mb(); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); - - /* We can now disable MSR_EE */ - local_irq_save(flags); - - /* Giveup the FPU & vec */ - enable_kernel_fp(); - -#ifdef CONFIG_ALTIVEC - if (cpu_has_feature(CPU_FTR_ALTIVEC)) - enable_kernel_altivec(); -#endif /* CONFIG_ALTIVEC */ - - /* Save & disable L2 and L3 caches */ - save_l3cr = _get_L3CR(); /* (returns -1 if not available) */ - save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ - - /* Send the new speed command. My assumption is that this command - * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep - */ - pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed); - while (!req.complete) - pmu_poll(); - - /* Prepare the northbridge for the speed transition */ - pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1); - - /* Call low level code to backup CPU state and recover from - * hardware reset - */ - low_sleep_handler(); - - /* Restore the northbridge */ - pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0); - - /* Restore L2 cache */ - if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) - _set_L2CR(save_l2cr); - /* Restore L3 cache */ - if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0) - _set_L3CR(save_l3cr); - - /* Restore userland MMU context */ - switch_mmu_context(NULL, current->active_mm); - -#ifdef DEBUG_FREQ - printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1)); -#endif - - /* Restore low level PMU operations */ - pmu_unlock(); - - /* - * Restore decrementer; we'll take a decrementer interrupt - * as soon as interrupts are re-enabled and the generic - * clockevents code will reprogram it with the right value. - */ - set_dec(1); - - /* Restore interrupts */ - mpic_cpu_set_priority(pic_prio); - - /* Let interrupts flow again ... */ - local_irq_restore(flags); - -#ifdef DEBUG_FREQ - debug_calc_bogomips(); -#endif - - pmu_resume(); - - preempt_enable(); - - return 0; -} - -static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode, - int notify) -{ - struct cpufreq_freqs freqs; - unsigned long l3cr; - static unsigned long prev_l3cr; - - freqs.old = cur_freq; - freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; - - if (freqs.old == freqs.new) - return 0; - - if (notify) - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - if (speed_mode == CPUFREQ_LOW && - cpu_has_feature(CPU_FTR_L3CR)) { - l3cr = _get_L3CR(); - if (l3cr & L3CR_L3E) { - prev_l3cr = l3cr; - _set_L3CR(0); - } - } - set_speed_proc(speed_mode == CPUFREQ_LOW); - if (speed_mode == CPUFREQ_HIGH && - cpu_has_feature(CPU_FTR_L3CR)) { - l3cr = _get_L3CR(); - if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr) - _set_L3CR(prev_l3cr); - } - if (notify) - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; - - return 0; -} - -static unsigned int pmac_cpufreq_get_speed(unsigned int cpu) -{ - return cur_freq; -} - -static int pmac_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs); -} - -static int pmac_cpufreq_target( struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate = 0; - int rc; - - if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs, - target_freq, relation, &newstate)) - return -EINVAL; - - rc = do_set_cpu_speed(policy, newstate, 1); - - ppc_proc_freq = cur_freq * 1000ul; - return rc; -} - -static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -ENODEV; - - policy->cpuinfo.transition_latency = transition_latency; - policy->cur = cur_freq; - - cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu); - return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs); -} - -static u32 read_gpio(struct device_node *np) -{ - const u32 *reg = of_get_property(np, "reg", NULL); - u32 offset; - - if (reg == NULL) - return 0; - /* That works for all keylargos but shall be fixed properly - * some day... The problem is that it seems we can't rely - * on the "reg" property of the GPIO nodes, they are either - * relative to the base of KeyLargo or to the base of the - * GPIO space, and the device-tree doesn't help. - */ - offset = *reg; - if (offset < KEYLARGO_GPIO_LEVELS0) - offset += KEYLARGO_GPIO_LEVELS0; - return offset; -} - -static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) -{ - /* Ok, this could be made a bit smarter, but let's be robust for now. We - * always force a speed change to high speed before sleep, to make sure - * we have appropriate voltage and/or bus speed for the wakeup process, - * and to make sure our loops_per_jiffies are "good enough", that is will - * not cause too short delays if we sleep in low speed and wake in high - * speed.. - */ - no_schedule = 1; - sleep_freq = cur_freq; - if (cur_freq == low_freq && !is_pmu_based) - do_set_cpu_speed(policy, CPUFREQ_HIGH, 0); - return 0; -} - -static int pmac_cpufreq_resume(struct cpufreq_policy *policy) -{ - /* If we resume, first check if we have a get() function */ - if (get_speed_proc) - cur_freq = get_speed_proc(); - else - cur_freq = 0; - - /* We don't, hrm... we don't really know our speed here, best - * is that we force a switch to whatever it was, which is - * probably high speed due to our suspend() routine - */ - do_set_cpu_speed(policy, sleep_freq == low_freq ? - CPUFREQ_LOW : CPUFREQ_HIGH, 0); - - ppc_proc_freq = cur_freq * 1000ul; - - no_schedule = 0; - return 0; -} - -static struct cpufreq_driver pmac_cpufreq_driver = { - .verify = pmac_cpufreq_verify, - .target = pmac_cpufreq_target, - .get = pmac_cpufreq_get_speed, - .init = pmac_cpufreq_cpu_init, - .suspend = pmac_cpufreq_suspend, - .resume = pmac_cpufreq_resume, - .flags = CPUFREQ_PM_NO_WARN, - .attr = pmac_cpu_freqs_attr, - .name = "powermac", - .owner = THIS_MODULE, -}; - - -static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) -{ - struct device_node *volt_gpio_np = of_find_node_by_name(NULL, - "voltage-gpio"); - struct device_node *freq_gpio_np = of_find_node_by_name(NULL, - "frequency-gpio"); - struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL, - "slewing-done"); - const u32 *value; - - /* - * Check to see if it's GPIO driven or PMU only - * - * The way we extract the GPIO address is slightly hackish, but it - * works well enough for now. We need to abstract the whole GPIO - * stuff sooner or later anyway - */ - - if (volt_gpio_np) - voltage_gpio = read_gpio(volt_gpio_np); - if (freq_gpio_np) - frequency_gpio = read_gpio(freq_gpio_np); - if (slew_done_gpio_np) - slew_done_gpio = read_gpio(slew_done_gpio_np); - - /* If we use the frequency GPIOs, calculate the min/max speeds based - * on the bus frequencies - */ - if (frequency_gpio && slew_done_gpio) { - int lenp, rc; - const u32 *freqs, *ratio; - - freqs = of_get_property(cpunode, "bus-frequencies", &lenp); - lenp /= sizeof(u32); - if (freqs == NULL || lenp != 2) { - printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n"); - return 1; - } - ratio = of_get_property(cpunode, "processor-to-bus-ratio*2", - NULL); - if (ratio == NULL) { - printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n"); - return 1; - } - - /* Get the min/max bus frequencies */ - low_freq = min(freqs[0], freqs[1]); - hi_freq = max(freqs[0], freqs[1]); - - /* Grrrr.. It _seems_ that the device-tree is lying on the low bus - * frequency, it claims it to be around 84Mhz on some models while - * it appears to be approx. 101Mhz on all. Let's hack around here... - * fortunately, we don't need to be too precise - */ - if (low_freq < 98000000) - low_freq = 101000000; - - /* Convert those to CPU core clocks */ - low_freq = (low_freq * (*ratio)) / 2000; - hi_freq = (hi_freq * (*ratio)) / 2000; - - /* Now we get the frequencies, we read the GPIO to see what is out current - * speed - */ - rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0); - cur_freq = (rc & 0x01) ? hi_freq : low_freq; - - set_speed_proc = gpios_set_cpu_speed; - return 1; - } - - /* If we use the PMU, look for the min & max frequencies in the - * device-tree - */ - value = of_get_property(cpunode, "min-clock-frequency", NULL); - if (!value) - return 1; - low_freq = (*value) / 1000; - /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree - * here */ - if (low_freq < 100000) - low_freq *= 10; - - value = of_get_property(cpunode, "max-clock-frequency", NULL); - if (!value) - return 1; - hi_freq = (*value) / 1000; - set_speed_proc = pmu_set_cpu_speed; - is_pmu_based = 1; - - return 0; -} - -static int pmac_cpufreq_init_7447A(struct device_node *cpunode) -{ - struct device_node *volt_gpio_np; - - if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL) - return 1; - - volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); - if (volt_gpio_np) - voltage_gpio = read_gpio(volt_gpio_np); - if (!voltage_gpio){ - printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n"); - return 1; - } - - /* OF only reports the high frequency */ - hi_freq = cur_freq; - low_freq = cur_freq/2; - - /* Read actual frequency from CPU */ - cur_freq = dfs_get_cpu_speed(); - set_speed_proc = dfs_set_cpu_speed; - get_speed_proc = dfs_get_cpu_speed; - - return 0; -} - -static int pmac_cpufreq_init_750FX(struct device_node *cpunode) -{ - struct device_node *volt_gpio_np; - u32 pvr; - const u32 *value; - - if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL) - return 1; - - hi_freq = cur_freq; - value = of_get_property(cpunode, "reduced-clock-frequency", NULL); - if (!value) - return 1; - low_freq = (*value) / 1000; - - volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); - if (volt_gpio_np) - voltage_gpio = read_gpio(volt_gpio_np); - - pvr = mfspr(SPRN_PVR); - has_cpu_l2lve = !((pvr & 0xf00) == 0x100); - - set_speed_proc = cpu_750fx_cpu_speed; - get_speed_proc = cpu_750fx_get_cpu_speed; - cur_freq = cpu_750fx_get_cpu_speed(); - - return 0; -} - -/* Currently, we support the following machines: - * - * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz) - * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz) - * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz) - * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz) - * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz) - * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage) - * - Recent MacRISC3 laptops - * - All new machines with 7447A CPUs - */ -static int __init pmac_cpufreq_setup(void) -{ - struct device_node *cpunode; - const u32 *value; - - if (strstr(cmd_line, "nocpufreq")) - return 0; - - /* Assume only one CPU */ - cpunode = of_find_node_by_type(NULL, "cpu"); - if (!cpunode) - goto out; - - /* Get current cpu clock freq */ - value = of_get_property(cpunode, "clock-frequency", NULL); - if (!value) - goto out; - cur_freq = (*value) / 1000; - transition_latency = CPUFREQ_ETERNAL; - - /* Check for 7447A based MacRISC3 */ - if (of_machine_is_compatible("MacRISC3") && - of_get_property(cpunode, "dynamic-power-step", NULL) && - PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { - pmac_cpufreq_init_7447A(cpunode); - transition_latency = 8000000; - /* Check for other MacRISC3 machines */ - } else if (of_machine_is_compatible("PowerBook3,4") || - of_machine_is_compatible("PowerBook3,5") || - of_machine_is_compatible("MacRISC3")) { - pmac_cpufreq_init_MacRISC3(cpunode); - /* Else check for iBook2 500/600 */ - } else if (of_machine_is_compatible("PowerBook4,1")) { - hi_freq = cur_freq; - low_freq = 400000; - set_speed_proc = pmu_set_cpu_speed; - is_pmu_based = 1; - } - /* Else check for TiPb 550 */ - else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) { - hi_freq = cur_freq; - low_freq = 500000; - set_speed_proc = pmu_set_cpu_speed; - is_pmu_based = 1; - } - /* Else check for TiPb 400 & 500 */ - else if (of_machine_is_compatible("PowerBook3,2")) { - /* We only know about the 400 MHz and the 500Mhz model - * they both have 300 MHz as low frequency - */ - if (cur_freq < 350000 || cur_freq > 550000) - goto out; - hi_freq = cur_freq; - low_freq = 300000; - set_speed_proc = pmu_set_cpu_speed; - is_pmu_based = 1; - } - /* Else check for 750FX */ - else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000) - pmac_cpufreq_init_750FX(cpunode); -out: - of_node_put(cpunode); - if (set_speed_proc == NULL) - return -ENODEV; - - pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq; - pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq; - ppc_proc_freq = cur_freq * 1000ul; - - printk(KERN_INFO "Registering PowerMac CPU frequency driver\n"); - printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n", - low_freq/1000, hi_freq/1000, cur_freq/1000); - - return cpufreq_register_driver(&pmac_cpufreq_driver); -} - -module_init(pmac_cpufreq_setup); - diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c deleted file mode 100644 index 7ba423431cfe..000000000000 --- a/arch/powerpc/platforms/powermac/cpufreq_64.c +++ /dev/null @@ -1,746 +0,0 @@ -/* - * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> - * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs, - * that is iMac G5 and latest single CPU desktop. - */ - -#undef DEBUG - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/cpufreq.h> -#include <linux/init.h> -#include <linux/completion.h> -#include <linux/mutex.h> -#include <asm/prom.h> -#include <asm/machdep.h> -#include <asm/irq.h> -#include <asm/sections.h> -#include <asm/cputable.h> -#include <asm/time.h> -#include <asm/smu.h> -#include <asm/pmac_pfunc.h> - -#define DBG(fmt...) pr_debug(fmt) - -/* see 970FX user manual */ - -#define SCOM_PCR 0x0aa001 /* PCR scom addr */ - -#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */ -#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */ -#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */ -#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */ -#define PCR_SPEED_MASK 0x000e0000U /* speed mask */ -#define PCR_SPEED_SHIFT 17 -#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */ -#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */ -#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */ -#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */ -#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */ -#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */ - -#define SCOM_PSR 0x408001 /* PSR scom addr */ -/* warning: PSR is a 64 bits register */ -#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */ -#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */ -#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */ -#define PSR_CUR_SPEED_SHIFT (56) - -/* - * The G5 only supports two frequencies (Quarter speed is not supported) - */ -#define CPUFREQ_HIGH 0 -#define CPUFREQ_LOW 1 - -static struct cpufreq_frequency_table g5_cpu_freqs[] = { - {CPUFREQ_HIGH, 0}, - {CPUFREQ_LOW, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -static struct freq_attr* g5_cpu_freqs_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -/* Power mode data is an array of the 32 bits PCR values to use for - * the various frequencies, retrieved from the device-tree - */ -static int g5_pmode_cur; - -static void (*g5_switch_volt)(int speed_mode); -static int (*g5_switch_freq)(int speed_mode); -static int (*g5_query_freq)(void); - -static DEFINE_MUTEX(g5_switch_mutex); - -static unsigned long transition_latency; - -#ifdef CONFIG_PMAC_SMU - -static const u32 *g5_pmode_data; -static int g5_pmode_max; - -static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ -static int g5_fvt_count; /* number of op. points */ -static int g5_fvt_cur; /* current op. point */ - -/* - * SMU based voltage switching for Neo2 platforms - */ - -static void g5_smu_switch_volt(int speed_mode) -{ - struct smu_simple_cmd cmd; - - DECLARE_COMPLETION_ONSTACK(comp); - smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete, - &comp, 'V', 'S', 'L', 'E', 'W', - 0xff, g5_fvt_cur+1, speed_mode); - wait_for_completion(&comp); -} - -/* - * Platform function based voltage/vdnap switching for Neo2 - */ - -static struct pmf_function *pfunc_set_vdnap0; -static struct pmf_function *pfunc_vdnap0_complete; - -static void g5_vdnap_switch_volt(int speed_mode) -{ - struct pmf_args args; - u32 slew, done = 0; - unsigned long timeout; - - slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0; - args.count = 1; - args.u[0].p = &slew; - - pmf_call_one(pfunc_set_vdnap0, &args); - - /* It's an irq GPIO so we should be able to just block here, - * I'll do that later after I've properly tested the IRQ code for - * platform functions - */ - timeout = jiffies + HZ/10; - while(!time_after(jiffies, timeout)) { - args.count = 1; - args.u[0].p = &done; - pmf_call_one(pfunc_vdnap0_complete, &args); - if (done) - break; - msleep(1); - } - if (done == 0) - printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); -} - - -/* - * SCOM based frequency switching for 970FX rev3 - */ -static int g5_scom_switch_freq(int speed_mode) -{ - unsigned long flags; - int to; - - /* If frequency is going up, first ramp up the voltage */ - if (speed_mode < g5_pmode_cur) - g5_switch_volt(speed_mode); - - local_irq_save(flags); - - /* Clear PCR high */ - scom970_write(SCOM_PCR, 0); - /* Clear PCR low */ - scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0); - /* Set PCR low */ - scom970_write(SCOM_PCR, PCR_HILO_SELECT | - g5_pmode_data[speed_mode]); - - /* Wait for completion */ - for (to = 0; to < 10; to++) { - unsigned long psr = scom970_read(SCOM_PSR); - - if ((psr & PSR_CMD_RECEIVED) == 0 && - (((psr >> PSR_CUR_SPEED_SHIFT) ^ - (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3) - == 0) - break; - if (psr & PSR_CMD_COMPLETED) - break; - udelay(100); - } - - local_irq_restore(flags); - - /* If frequency is going down, last ramp the voltage */ - if (speed_mode > g5_pmode_cur) - g5_switch_volt(speed_mode); - - g5_pmode_cur = speed_mode; - ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; - - return 0; -} - -static int g5_scom_query_freq(void) -{ - unsigned long psr = scom970_read(SCOM_PSR); - int i; - - for (i = 0; i <= g5_pmode_max; i++) - if ((((psr >> PSR_CUR_SPEED_SHIFT) ^ - (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0) - break; - return i; -} - -/* - * Fake voltage switching for platforms with missing support - */ - -static void g5_dummy_switch_volt(int speed_mode) -{ -} - -#endif /* CONFIG_PMAC_SMU */ - -/* - * Platform function based voltage switching for PowerMac7,2 & 7,3 - */ - -static struct pmf_function *pfunc_cpu0_volt_high; -static struct pmf_function *pfunc_cpu0_volt_low; -static struct pmf_function *pfunc_cpu1_volt_high; -static struct pmf_function *pfunc_cpu1_volt_low; - -static void g5_pfunc_switch_volt(int speed_mode) -{ - if (speed_mode == CPUFREQ_HIGH) { - if (pfunc_cpu0_volt_high) - pmf_call_one(pfunc_cpu0_volt_high, NULL); - if (pfunc_cpu1_volt_high) - pmf_call_one(pfunc_cpu1_volt_high, NULL); - } else { - if (pfunc_cpu0_volt_low) - pmf_call_one(pfunc_cpu0_volt_low, NULL); - if (pfunc_cpu1_volt_low) - pmf_call_one(pfunc_cpu1_volt_low, NULL); - } - msleep(10); /* should be faster , to fix */ -} - -/* - * Platform function based frequency switching for PowerMac7,2 & 7,3 - */ - -static struct pmf_function *pfunc_cpu_setfreq_high; -static struct pmf_function *pfunc_cpu_setfreq_low; -static struct pmf_function *pfunc_cpu_getfreq; -static struct pmf_function *pfunc_slewing_done; - -static int g5_pfunc_switch_freq(int speed_mode) -{ - struct pmf_args args; - u32 done = 0; - unsigned long timeout; - int rc; - - DBG("g5_pfunc_switch_freq(%d)\n", speed_mode); - - /* If frequency is going up, first ramp up the voltage */ - if (speed_mode < g5_pmode_cur) - g5_switch_volt(speed_mode); - - /* Do it */ - if (speed_mode == CPUFREQ_HIGH) - rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL); - else - rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL); - - if (rc) - printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc); - - /* It's an irq GPIO so we should be able to just block here, - * I'll do that later after I've properly tested the IRQ code for - * platform functions - */ - timeout = jiffies + HZ/10; - while(!time_after(jiffies, timeout)) { - args.count = 1; - args.u[0].p = &done; - pmf_call_one(pfunc_slewing_done, &args); - if (done) - break; - msleep(1); - } - if (done == 0) - printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); - - /* If frequency is going down, last ramp the voltage */ - if (speed_mode > g5_pmode_cur) - g5_switch_volt(speed_mode); - - g5_pmode_cur = speed_mode; - ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; - - return 0; -} - -static int g5_pfunc_query_freq(void) -{ - struct pmf_args args; - u32 val = 0; - - args.count = 1; - args.u[0].p = &val; - pmf_call_one(pfunc_cpu_getfreq, &args); - return val ? CPUFREQ_HIGH : CPUFREQ_LOW; -} - - -/* - * Common interface to the cpufreq core - */ - -static int g5_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, g5_cpu_freqs); -} - -static int g5_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - unsigned int newstate = 0; - struct cpufreq_freqs freqs; - int rc; - - if (cpufreq_frequency_table_target(policy, g5_cpu_freqs, - target_freq, relation, &newstate)) - return -EINVAL; - - if (g5_pmode_cur == newstate) - return 0; - - mutex_lock(&g5_switch_mutex); - - freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; - freqs.new = g5_cpu_freqs[newstate].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - rc = g5_switch_freq(newstate); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - mutex_unlock(&g5_switch_mutex); - - return rc; -} - -static unsigned int g5_cpufreq_get_speed(unsigned int cpu) -{ - return g5_cpu_freqs[g5_pmode_cur].frequency; -} - -static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - policy->cpuinfo.transition_latency = transition_latency; - policy->cur = g5_cpu_freqs[g5_query_freq()].frequency; - /* secondary CPUs are tied to the primary one by the - * cpufreq core if in the secondary policy we tell it that - * it actually must be one policy together with all others. */ - cpumask_copy(policy->cpus, cpu_online_mask); - cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, - g5_cpu_freqs); -} - - -static struct cpufreq_driver g5_cpufreq_driver = { - .name = "powermac", - .owner = THIS_MODULE, - .flags = CPUFREQ_CONST_LOOPS, - .init = g5_cpufreq_cpu_init, - .verify = g5_cpufreq_verify, - .target = g5_cpufreq_target, - .get = g5_cpufreq_get_speed, - .attr = g5_cpu_freqs_attr, -}; - - -#ifdef CONFIG_PMAC_SMU - -static int __init g5_neo2_cpufreq_init(struct device_node *cpus) -{ - struct device_node *cpunode; - unsigned int psize, ssize; - unsigned long max_freq; - char *freq_method, *volt_method; - const u32 *valp; - u32 pvr_hi; - int use_volts_vdnap = 0; - int use_volts_smu = 0; - int rc = -ENODEV; - - /* Check supported platforms */ - if (of_machine_is_compatible("PowerMac8,1") || - of_machine_is_compatible("PowerMac8,2") || - of_machine_is_compatible("PowerMac9,1")) - use_volts_smu = 1; - else if (of_machine_is_compatible("PowerMac11,2")) - use_volts_vdnap = 1; - else - return -ENODEV; - - /* Get first CPU node */ - for (cpunode = NULL; - (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) { - const u32 *reg = of_get_property(cpunode, "reg", NULL); - if (reg == NULL || (*reg) != 0) - continue; - if (!strcmp(cpunode->type, "cpu")) - break; - } - if (cpunode == NULL) { - printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n"); - return -ENODEV; - } - - /* Check 970FX for now */ - valp = of_get_property(cpunode, "cpu-version", NULL); - if (!valp) { - DBG("No cpu-version property !\n"); - goto bail_noprops; - } - pvr_hi = (*valp) >> 16; - if (pvr_hi != 0x3c && pvr_hi != 0x44) { - printk(KERN_ERR "cpufreq: Unsupported CPU version\n"); - goto bail_noprops; - } - - /* Look for the powertune data in the device-tree */ - g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize); - if (!g5_pmode_data) { - DBG("No power-mode-data !\n"); - goto bail_noprops; - } - g5_pmode_max = psize / sizeof(u32) - 1; - - if (use_volts_smu) { - const struct smu_sdbp_header *shdr; - - /* Look for the FVT table */ - shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); - if (!shdr) - goto bail_noprops; - g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1]; - ssize = (shdr->len * sizeof(u32)) - - sizeof(struct smu_sdbp_header); - g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt); - g5_fvt_cur = 0; - - /* Sanity checking */ - if (g5_fvt_count < 1 || g5_pmode_max < 1) - goto bail_noprops; - - g5_switch_volt = g5_smu_switch_volt; - volt_method = "SMU"; - } else if (use_volts_vdnap) { - struct device_node *root; - - root = of_find_node_by_path("/"); - if (root == NULL) { - printk(KERN_ERR "cpufreq: Can't find root of " - "device tree\n"); - goto bail_noprops; - } - pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0"); - pfunc_vdnap0_complete = - pmf_find_function(root, "slewing-done"); - if (pfunc_set_vdnap0 == NULL || - pfunc_vdnap0_complete == NULL) { - printk(KERN_ERR "cpufreq: Can't find required " - "platform function\n"); - goto bail_noprops; - } - - g5_switch_volt = g5_vdnap_switch_volt; - volt_method = "GPIO"; - } else { - g5_switch_volt = g5_dummy_switch_volt; - volt_method = "none"; - } - - /* - * From what I see, clock-frequency is always the maximal frequency. - * The current driver can not slew sysclk yet, so we really only deal - * with powertune steps for now. We also only implement full freq and - * half freq in this version. So far, I haven't yet seen a machine - * supporting anything else. - */ - valp = of_get_property(cpunode, "clock-frequency", NULL); - if (!valp) - return -ENODEV; - max_freq = (*valp)/1000; - g5_cpu_freqs[0].frequency = max_freq; - g5_cpu_freqs[1].frequency = max_freq/2; - - /* Set callbacks */ - transition_latency = 12000; - g5_switch_freq = g5_scom_switch_freq; - g5_query_freq = g5_scom_query_freq; - freq_method = "SCOM"; - - /* Force apply current frequency to make sure everything is in - * sync (voltage is right for example). Firmware may leave us with - * a strange setting ... - */ - g5_switch_volt(CPUFREQ_HIGH); - msleep(10); - g5_pmode_cur = -1; - g5_switch_freq(g5_query_freq()); - - printk(KERN_INFO "Registering G5 CPU frequency driver\n"); - printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n", - freq_method, volt_method); - printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", - g5_cpu_freqs[1].frequency/1000, - g5_cpu_freqs[0].frequency/1000, - g5_cpu_freqs[g5_pmode_cur].frequency/1000); - - rc = cpufreq_register_driver(&g5_cpufreq_driver); - - /* We keep the CPU node on hold... hopefully, Apple G5 don't have - * hotplug CPU with a dynamic device-tree ... - */ - return rc; - - bail_noprops: - of_node_put(cpunode); - - return rc; -} - -#endif /* CONFIG_PMAC_SMU */ - - -static int __init g5_pm72_cpufreq_init(struct device_node *cpus) -{ - struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL; - const u8 *eeprom = NULL; - const u32 *valp; - u64 max_freq, min_freq, ih, il; - int has_volt = 1, rc = 0; - - DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and" - " RackMac3,1...\n"); - - /* Get first CPU node */ - for (cpunode = NULL; - (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) { - if (!strcmp(cpunode->type, "cpu")) - break; - } - if (cpunode == NULL) { - printk(KERN_ERR "cpufreq: Can't find any CPU node\n"); - return -ENODEV; - } - - /* Lookup the cpuid eeprom node */ - cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0"); - if (cpuid != NULL) - eeprom = of_get_property(cpuid, "cpuid", NULL); - if (eeprom == NULL) { - printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n"); - rc = -ENODEV; - goto bail; - } - - /* Lookup the i2c hwclock */ - for (hwclock = NULL; - (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){ - const char *loc = of_get_property(hwclock, - "hwctrl-location", NULL); - if (loc == NULL) - continue; - if (strcmp(loc, "CPU CLOCK")) - continue; - if (!of_get_property(hwclock, "platform-get-frequency", NULL)) - continue; - break; - } - if (hwclock == NULL) { - printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n"); - rc = -ENODEV; - goto bail; - } - - DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name); - - /* Now get all the platform functions */ - pfunc_cpu_getfreq = - pmf_find_function(hwclock, "get-frequency"); - pfunc_cpu_setfreq_high = - pmf_find_function(hwclock, "set-frequency-high"); - pfunc_cpu_setfreq_low = - pmf_find_function(hwclock, "set-frequency-low"); - pfunc_slewing_done = - pmf_find_function(hwclock, "slewing-done"); - pfunc_cpu0_volt_high = - pmf_find_function(hwclock, "set-voltage-high-0"); - pfunc_cpu0_volt_low = - pmf_find_function(hwclock, "set-voltage-low-0"); - pfunc_cpu1_volt_high = - pmf_find_function(hwclock, "set-voltage-high-1"); - pfunc_cpu1_volt_low = - pmf_find_function(hwclock, "set-voltage-low-1"); - - /* Check we have minimum requirements */ - if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL || - pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) { - printk(KERN_ERR "cpufreq: Can't find platform functions !\n"); - rc = -ENODEV; - goto bail; - } - - /* Check that we have complete sets */ - if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) { - pmf_put_function(pfunc_cpu0_volt_high); - pmf_put_function(pfunc_cpu0_volt_low); - pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL; - has_volt = 0; - } - if (!has_volt || - pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) { - pmf_put_function(pfunc_cpu1_volt_high); - pmf_put_function(pfunc_cpu1_volt_low); - pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL; - } - - /* Note: The device tree also contains a "platform-set-values" - * function for which I haven't quite figured out the usage. It - * might have to be called on init and/or wakeup, I'm not too sure - * but things seem to work fine without it so far ... - */ - - /* Get max frequency from device-tree */ - valp = of_get_property(cpunode, "clock-frequency", NULL); - if (!valp) { - printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n"); - rc = -ENODEV; - goto bail; - } - - max_freq = (*valp)/1000; - - /* Now calculate reduced frequency by using the cpuid input freq - * ratio. This requires 64 bits math unless we are willing to lose - * some precision - */ - ih = *((u32 *)(eeprom + 0x10)); - il = *((u32 *)(eeprom + 0x20)); - - /* Check for machines with no useful settings */ - if (il == ih) { - printk(KERN_WARNING "cpufreq: No low frequency mode available" - " on this model !\n"); - rc = -ENODEV; - goto bail; - } - - min_freq = 0; - if (ih != 0 && il != 0) - min_freq = (max_freq * il) / ih; - - /* Sanity check */ - if (min_freq >= max_freq || min_freq < 1000) { - printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n"); - rc = -ENXIO; - goto bail; - } - g5_cpu_freqs[0].frequency = max_freq; - g5_cpu_freqs[1].frequency = min_freq; - - /* Set callbacks */ - transition_latency = CPUFREQ_ETERNAL; - g5_switch_volt = g5_pfunc_switch_volt; - g5_switch_freq = g5_pfunc_switch_freq; - g5_query_freq = g5_pfunc_query_freq; - - /* Force apply current frequency to make sure everything is in - * sync (voltage is right for example). Firmware may leave us with - * a strange setting ... - */ - g5_switch_volt(CPUFREQ_HIGH); - msleep(10); - g5_pmode_cur = -1; - g5_switch_freq(g5_query_freq()); - - printk(KERN_INFO "Registering G5 CPU frequency driver\n"); - printk(KERN_INFO "Frequency method: i2c/pfunc, " - "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none"); - printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", - g5_cpu_freqs[1].frequency/1000, - g5_cpu_freqs[0].frequency/1000, - g5_cpu_freqs[g5_pmode_cur].frequency/1000); - - rc = cpufreq_register_driver(&g5_cpufreq_driver); - bail: - if (rc != 0) { - pmf_put_function(pfunc_cpu_getfreq); - pmf_put_function(pfunc_cpu_setfreq_high); - pmf_put_function(pfunc_cpu_setfreq_low); - pmf_put_function(pfunc_slewing_done); - pmf_put_function(pfunc_cpu0_volt_high); - pmf_put_function(pfunc_cpu0_volt_low); - pmf_put_function(pfunc_cpu1_volt_high); - pmf_put_function(pfunc_cpu1_volt_low); - } - of_node_put(hwclock); - of_node_put(cpuid); - of_node_put(cpunode); - - return rc; -} - -static int __init g5_cpufreq_init(void) -{ - struct device_node *cpus; - int rc = 0; - - cpus = of_find_node_by_path("/cpus"); - if (cpus == NULL) { - DBG("No /cpus node !\n"); - return -ENODEV; - } - - if (of_machine_is_compatible("PowerMac7,2") || - of_machine_is_compatible("PowerMac7,3") || - of_machine_is_compatible("RackMac3,1")) - rc = g5_pm72_cpufreq_init(cpus); -#ifdef CONFIG_PMAC_SMU - else - rc = g5_neo2_cpufreq_init(cpus); -#endif /* CONFIG_PMAC_SMU */ - - of_node_put(cpus); - return rc; -} - -module_init(g5_cpufreq_init); - - -MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index bdb738a69e41..5cbd4d67d5c4 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -192,7 +192,7 @@ static int psurge_secondary_ipi_init(void) { int rc = -ENOMEM; - psurge_host = irq_domain_add_nomap(NULL, 0, &psurge_host_ops, NULL); + psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL); if (psurge_host) psurge_secondary_virq = irq_create_direct_mapping(psurge_host); @@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { +static struct notifier_block smp_core99_cpu_nb = { .notifier_call = smp_core99_cpu_notify, }; #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index bcc3cb48a44e..7fe595152478 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -3,3 +3,4 @@ obj-y += opal-rtc.o opal-nvram.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o +obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c new file mode 100644 index 000000000000..0cd1c4a71755 --- /dev/null +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -0,0 +1,916 @@ +/* + * The file intends to implement the functions needed by EEH, which is + * built on IODA compliant chip. Actually, lots of functions related + * to EEH would be built based on the OPAL APIs. + * + * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/bootmem.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/msi.h> +#include <linux/notifier.h> +#include <linux/pci.h> +#include <linux/string.h> + +#include <asm/eeh.h> +#include <asm/eeh_event.h> +#include <asm/io.h> +#include <asm/iommu.h> +#include <asm/msi_bitmap.h> +#include <asm/opal.h> +#include <asm/pci-bridge.h> +#include <asm/ppc-pci.h> +#include <asm/tce.h> + +#include "powernv.h" +#include "pci.h" + +/* Debugging option */ +#ifdef IODA_EEH_DBG_ON +#define IODA_EEH_DBG(args...) pr_info(args) +#else +#define IODA_EEH_DBG(args...) +#endif + +static char *hub_diag = NULL; +static int ioda_eeh_nb_init = 0; + +static int ioda_eeh_event(struct notifier_block *nb, + unsigned long events, void *change) +{ + uint64_t changed_evts = (uint64_t)change; + + /* We simply send special EEH event */ + if ((changed_evts & OPAL_EVENT_PCI_ERROR) && + (events & OPAL_EVENT_PCI_ERROR)) + eeh_send_failure_event(NULL); + + return 0; +} + +static struct notifier_block ioda_eeh_nb = { + .notifier_call = ioda_eeh_event, + .next = NULL, + .priority = 0 +}; + +#ifdef CONFIG_DEBUG_FS +static int ioda_eeh_dbgfs_set(void *data, u64 val) +{ + struct pci_controller *hose = data; + struct pnv_phb *phb = hose->private_data; + + out_be64(phb->regs + 0xD10, val); + return 0; +} + +static int ioda_eeh_dbgfs_get(void *data, u64 *val) +{ + struct pci_controller *hose = data; + struct pnv_phb *phb = hose->private_data; + + *val = in_be64(phb->regs + 0xD10); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_dbgfs_ops, ioda_eeh_dbgfs_get, + ioda_eeh_dbgfs_set, "0x%llx\n"); +#endif /* CONFIG_DEBUG_FS */ + +/** + * ioda_eeh_post_init - Chip dependent post initialization + * @hose: PCI controller + * + * The function will be called after eeh PEs and devices + * have been built. That means the EEH is ready to supply + * service with I/O cache. + */ +static int ioda_eeh_post_init(struct pci_controller *hose) +{ + struct pnv_phb *phb = hose->private_data; + int ret; + + /* Register OPAL event notifier */ + if (!ioda_eeh_nb_init) { + ret = opal_notifier_register(&ioda_eeh_nb); + if (ret) { + pr_err("%s: Can't register OPAL event notifier (%d)\n", + __func__, ret); + return ret; + } + + ioda_eeh_nb_init = 1; + } + + /* FIXME: Enable it for PHB3 later */ + if (phb->type == PNV_PHB_IODA1) { + if (!hub_diag) { + hub_diag = (char *)__get_free_page(GFP_KERNEL | + __GFP_ZERO); + if (!hub_diag) { + pr_err("%s: Out of memory !\n", + __func__); + return -ENOMEM; + } + } + +#ifdef CONFIG_DEBUG_FS + if (phb->dbgfs) + debugfs_create_file("err_injct", 0600, + phb->dbgfs, hose, + &ioda_eeh_dbgfs_ops); +#endif + + phb->eeh_state |= PNV_EEH_STATE_ENABLED; + } + + return 0; +} + +/** + * ioda_eeh_set_option - Set EEH operation or I/O setting + * @pe: EEH PE + * @option: options + * + * Enable or disable EEH option for the indicated PE. The + * function also can be used to enable I/O or DMA for the + * PE. + */ +static int ioda_eeh_set_option(struct eeh_pe *pe, int option) +{ + s64 ret; + u32 pe_no; + struct pci_controller *hose = pe->phb; + struct pnv_phb *phb = hose->private_data; + + /* Check on PE number */ + if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) { + pr_err("%s: PE address %x out of range [0, %x] " + "on PHB#%x\n", + __func__, pe->addr, phb->ioda.total_pe, + hose->global_number); + return -EINVAL; + } + + pe_no = pe->addr; + switch (option) { + case EEH_OPT_DISABLE: + ret = -EEXIST; + break; + case EEH_OPT_ENABLE: + ret = 0; + break; + case EEH_OPT_THAW_MMIO: + ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, + OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO); + if (ret) { + pr_warning("%s: Failed to enable MMIO for " + "PHB#%x-PE#%x, err=%lld\n", + __func__, hose->global_number, pe_no, ret); + return -EIO; + } + + break; + case EEH_OPT_THAW_DMA: + ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, + OPAL_EEH_ACTION_CLEAR_FREEZE_DMA); + if (ret) { + pr_warning("%s: Failed to enable DMA for " + "PHB#%x-PE#%x, err=%lld\n", + __func__, hose->global_number, pe_no, ret); + return -EIO; + } + + break; + default: + pr_warning("%s: Invalid option %d\n", __func__, option); + return -EINVAL; + } + + return ret; +} + +/** + * ioda_eeh_get_state - Retrieve the state of PE + * @pe: EEH PE + * + * The PE's state should be retrieved from the PEEV, PEST + * IODA tables. Since the OPAL has exported the function + * to do it, it'd better to use that. + */ +static int ioda_eeh_get_state(struct eeh_pe *pe) +{ + s64 ret = 0; + u8 fstate; + u16 pcierr; + u32 pe_no; + int result; + struct pci_controller *hose = pe->phb; + struct pnv_phb *phb = hose->private_data; + + /* + * Sanity check on PE address. The PHB PE address should + * be zero. + */ + if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) { + pr_err("%s: PE address %x out of range [0, %x] " + "on PHB#%x\n", + __func__, pe->addr, phb->ioda.total_pe, + hose->global_number); + return EEH_STATE_NOT_SUPPORT; + } + + /* Retrieve PE status through OPAL */ + pe_no = pe->addr; + ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, + &fstate, &pcierr, NULL); + if (ret) { + pr_err("%s: Failed to get EEH status on " + "PHB#%x-PE#%x\n, err=%lld\n", + __func__, hose->global_number, pe_no, ret); + return EEH_STATE_NOT_SUPPORT; + } + + /* Check PHB status */ + if (pe->type & EEH_PE_PHB) { + result = 0; + result &= ~EEH_STATE_RESET_ACTIVE; + + if (pcierr != OPAL_EEH_PHB_ERROR) { + result |= EEH_STATE_MMIO_ACTIVE; + result |= EEH_STATE_DMA_ACTIVE; + result |= EEH_STATE_MMIO_ENABLED; + result |= EEH_STATE_DMA_ENABLED; + } + + return result; + } + + /* Parse result out */ + result = 0; + switch (fstate) { + case OPAL_EEH_STOPPED_NOT_FROZEN: + result &= ~EEH_STATE_RESET_ACTIVE; + result |= EEH_STATE_MMIO_ACTIVE; + result |= EEH_STATE_DMA_ACTIVE; + result |= EEH_STATE_MMIO_ENABLED; + result |= EEH_STATE_DMA_ENABLED; + break; + case OPAL_EEH_STOPPED_MMIO_FREEZE: + result &= ~EEH_STATE_RESET_ACTIVE; + result |= EEH_STATE_DMA_ACTIVE; + result |= EEH_STATE_DMA_ENABLED; + break; + case OPAL_EEH_STOPPED_DMA_FREEZE: + result &= ~EEH_STATE_RESET_ACTIVE; + result |= EEH_STATE_MMIO_ACTIVE; + result |= EEH_STATE_MMIO_ENABLED; + break; + case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: + result &= ~EEH_STATE_RESET_ACTIVE; + break; + case OPAL_EEH_STOPPED_RESET: + result |= EEH_STATE_RESET_ACTIVE; + break; + case OPAL_EEH_STOPPED_TEMP_UNAVAIL: + result |= EEH_STATE_UNAVAILABLE; + break; + case OPAL_EEH_STOPPED_PERM_UNAVAIL: + result |= EEH_STATE_NOT_SUPPORT; + break; + default: + pr_warning("%s: Unexpected EEH status 0x%x " + "on PHB#%x-PE#%x\n", + __func__, fstate, hose->global_number, pe_no); + } + + return result; +} + +static int ioda_eeh_pe_clear(struct eeh_pe *pe) +{ + struct pci_controller *hose; + struct pnv_phb *phb; + u32 pe_no; + u8 fstate; + u16 pcierr; + s64 ret; + + pe_no = pe->addr; + hose = pe->phb; + phb = pe->phb->private_data; + + /* Clear the EEH error on the PE */ + ret = opal_pci_eeh_freeze_clear(phb->opal_id, + pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); + if (ret) { + pr_err("%s: Failed to clear EEH error for " + "PHB#%x-PE#%x, err=%lld\n", + __func__, hose->global_number, pe_no, ret); + return -EIO; + } + + /* + * Read the PE state back and verify that the frozen + * state has been removed. + */ + ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, + &fstate, &pcierr, NULL); + if (ret) { + pr_err("%s: Failed to get EEH status on " + "PHB#%x-PE#%x\n, err=%lld\n", + __func__, hose->global_number, pe_no, ret); + return -EIO; + } + + if (fstate != OPAL_EEH_STOPPED_NOT_FROZEN) { + pr_err("%s: Frozen state not cleared on " + "PHB#%x-PE#%x, sts=%x\n", + __func__, hose->global_number, pe_no, fstate); + return -EIO; + } + + return 0; +} + +static s64 ioda_eeh_phb_poll(struct pnv_phb *phb) +{ + s64 rc = OPAL_HARDWARE; + + while (1) { + rc = opal_pci_poll(phb->opal_id); + if (rc <= 0) + break; + + msleep(rc); + } + + return rc; +} + +static int ioda_eeh_phb_reset(struct pci_controller *hose, int option) +{ + struct pnv_phb *phb = hose->private_data; + s64 rc = OPAL_HARDWARE; + + pr_debug("%s: Reset PHB#%x, option=%d\n", + __func__, hose->global_number, option); + + /* Issue PHB complete reset request */ + if (option == EEH_RESET_FUNDAMENTAL || + option == EEH_RESET_HOT) + rc = opal_pci_reset(phb->opal_id, + OPAL_PHB_COMPLETE, + OPAL_ASSERT_RESET); + else if (option == EEH_RESET_DEACTIVATE) + rc = opal_pci_reset(phb->opal_id, + OPAL_PHB_COMPLETE, + OPAL_DEASSERT_RESET); + if (rc < 0) + goto out; + + /* + * Poll state of the PHB until the request is done + * successfully. + */ + rc = ioda_eeh_phb_poll(phb); +out: + if (rc != OPAL_SUCCESS) + return -EIO; + + return 0; +} + +static int ioda_eeh_root_reset(struct pci_controller *hose, int option) +{ + struct pnv_phb *phb = hose->private_data; + s64 rc = OPAL_SUCCESS; + + pr_debug("%s: Reset PHB#%x, option=%d\n", + __func__, hose->global_number, option); + + /* + * During the reset deassert time, we needn't care + * the reset scope because the firmware does nothing + * for fundamental or hot reset during deassert phase. + */ + if (option == EEH_RESET_FUNDAMENTAL) + rc = opal_pci_reset(phb->opal_id, + OPAL_PCI_FUNDAMENTAL_RESET, + OPAL_ASSERT_RESET); + else if (option == EEH_RESET_HOT) + rc = opal_pci_reset(phb->opal_id, + OPAL_PCI_HOT_RESET, + OPAL_ASSERT_RESET); + else if (option == EEH_RESET_DEACTIVATE) + rc = opal_pci_reset(phb->opal_id, + OPAL_PCI_HOT_RESET, + OPAL_DEASSERT_RESET); + if (rc < 0) + goto out; + + /* Poll state of the PHB until the request is done */ + rc = ioda_eeh_phb_poll(phb); +out: + if (rc != OPAL_SUCCESS) + return -EIO; + + return 0; +} + +static int ioda_eeh_bridge_reset(struct pci_controller *hose, + struct pci_dev *dev, int option) +{ + u16 ctrl; + + pr_debug("%s: Reset device %04x:%02x:%02x.%01x with option %d\n", + __func__, hose->global_number, dev->bus->number, + PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), option); + + switch (option) { + case EEH_RESET_FUNDAMENTAL: + case EEH_RESET_HOT: + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + break; + case EEH_RESET_DEACTIVATE: + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + break; + } + + return 0; +} + +/** + * ioda_eeh_reset - Reset the indicated PE + * @pe: EEH PE + * @option: reset option + * + * Do reset on the indicated PE. For PCI bus sensitive PE, + * we need to reset the parent p2p bridge. The PHB has to + * be reinitialized if the p2p bridge is root bridge. For + * PCI device sensitive PE, we will try to reset the device + * through FLR. For now, we don't have OPAL APIs to do HARD + * reset yet, so all reset would be SOFT (HOT) reset. + */ +static int ioda_eeh_reset(struct eeh_pe *pe, int option) +{ + struct pci_controller *hose = pe->phb; + struct eeh_dev *edev; + struct pci_dev *dev; + int ret; + + /* + * Anyway, we have to clear the problematic state for the + * corresponding PE. However, we needn't do it if the PE + * is PHB associated. That means the PHB is having fatal + * errors and it needs reset. Further more, the AIB interface + * isn't reliable any more. + */ + if (!(pe->type & EEH_PE_PHB) && + (option == EEH_RESET_HOT || + option == EEH_RESET_FUNDAMENTAL)) { + ret = ioda_eeh_pe_clear(pe); + if (ret) + return -EIO; + } + + /* + * The rules applied to reset, either fundamental or hot reset: + * + * We always reset the direct upstream bridge of the PE. If the + * direct upstream bridge isn't root bridge, we always take hot + * reset no matter what option (fundamental or hot) is. Otherwise, + * we should do the reset according to the required option. + */ + if (pe->type & EEH_PE_PHB) { + ret = ioda_eeh_phb_reset(hose, option); + } else { + if (pe->type & EEH_PE_DEVICE) { + /* + * If it's device PE, we didn't refer to the parent + * PCI bus yet. So we have to figure it out indirectly. + */ + edev = list_first_entry(&pe->edevs, + struct eeh_dev, list); + dev = eeh_dev_to_pci_dev(edev); + dev = dev->bus->self; + } else { + /* + * If it's bus PE, the parent PCI bus is already there + * and just pick it up. + */ + dev = pe->bus->self; + } + + /* + * Do reset based on the fact that the direct upstream bridge + * is root bridge (port) or not. + */ + if (dev->bus->number == 0) + ret = ioda_eeh_root_reset(hose, option); + else + ret = ioda_eeh_bridge_reset(hose, dev, option); + } + + return ret; +} + +/** + * ioda_eeh_get_log - Retrieve error log + * @pe: EEH PE + * @severity: Severity level of the log + * @drv_log: buffer to store the log + * @len: space of the log buffer + * + * The function is used to retrieve error log from P7IOC. + */ +static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, + char *drv_log, unsigned long len) +{ + s64 ret; + unsigned long flags; + struct pci_controller *hose = pe->phb; + struct pnv_phb *phb = hose->private_data; + + spin_lock_irqsave(&phb->lock, flags); + + ret = opal_pci_get_phb_diag_data2(phb->opal_id, + phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); + if (ret) { + spin_unlock_irqrestore(&phb->lock, flags); + pr_warning("%s: Failed to get log for PHB#%x-PE#%x\n", + __func__, hose->global_number, pe->addr); + return -EIO; + } + + /* + * FIXME: We probably need log the error in somewhere. + * Lets make it up in future. + */ + /* pr_info("%s", phb->diag.blob); */ + + spin_unlock_irqrestore(&phb->lock, flags); + + return 0; +} + +/** + * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE + * @pe: EEH PE + * + * For particular PE, it might have included PCI bridges. In order + * to make the PE work properly, those PCI bridges should be configured + * correctly. However, we need do nothing on P7IOC since the reset + * function will do everything that should be covered by the function. + */ +static int ioda_eeh_configure_bridge(struct eeh_pe *pe) +{ + return 0; +} + +static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) +{ + /* GEM */ + pr_info(" GEM XFIR: %016llx\n", data->gemXfir); + pr_info(" GEM RFIR: %016llx\n", data->gemRfir); + pr_info(" GEM RIRQFIR: %016llx\n", data->gemRirqfir); + pr_info(" GEM Mask: %016llx\n", data->gemMask); + pr_info(" GEM RWOF: %016llx\n", data->gemRwof); + + /* LEM */ + pr_info(" LEM FIR: %016llx\n", data->lemFir); + pr_info(" LEM Error Mask: %016llx\n", data->lemErrMask); + pr_info(" LEM Action 0: %016llx\n", data->lemAction0); + pr_info(" LEM Action 1: %016llx\n", data->lemAction1); + pr_info(" LEM WOF: %016llx\n", data->lemWof); +} + +static void ioda_eeh_hub_diag(struct pci_controller *hose) +{ + struct pnv_phb *phb = hose->private_data; + struct OpalIoP7IOCErrorData *data; + long rc; + + data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; + rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE); + if (rc != OPAL_SUCCESS) { + pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", + __func__, phb->hub_id, rc); + return; + } + + switch (data->type) { + case OPAL_P7IOC_DIAG_TYPE_RGC: + pr_info("P7IOC diag-data for RGC\n\n"); + ioda_eeh_hub_diag_common(data); + pr_info(" RGC Status: %016llx\n", data->rgc.rgcStatus); + pr_info(" RGC LDCP: %016llx\n", data->rgc.rgcLdcp); + break; + case OPAL_P7IOC_DIAG_TYPE_BI: + pr_info("P7IOC diag-data for BI %s\n\n", + data->bi.biDownbound ? "Downbound" : "Upbound"); + ioda_eeh_hub_diag_common(data); + pr_info(" BI LDCP 0: %016llx\n", data->bi.biLdcp0); + pr_info(" BI LDCP 1: %016llx\n", data->bi.biLdcp1); + pr_info(" BI LDCP 2: %016llx\n", data->bi.biLdcp2); + pr_info(" BI Fence Status: %016llx\n", data->bi.biFenceStatus); + break; + case OPAL_P7IOC_DIAG_TYPE_CI: + pr_info("P7IOC diag-data for CI Port %d\\nn", + data->ci.ciPort); + ioda_eeh_hub_diag_common(data); + pr_info(" CI Port Status: %016llx\n", data->ci.ciPortStatus); + pr_info(" CI Port LDCP: %016llx\n", data->ci.ciPortLdcp); + break; + case OPAL_P7IOC_DIAG_TYPE_MISC: + pr_info("P7IOC diag-data for MISC\n\n"); + ioda_eeh_hub_diag_common(data); + break; + case OPAL_P7IOC_DIAG_TYPE_I2C: + pr_info("P7IOC diag-data for I2C\n\n"); + ioda_eeh_hub_diag_common(data); + break; + default: + pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n", + __func__, phb->hub_id, data->type); + } +} + +static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose, + struct OpalIoPhbErrorCommon *common) +{ + struct OpalIoP7IOCPhbErrorData *data; + int i; + + data = (struct OpalIoP7IOCPhbErrorData *)common; + + pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n\n", + hose->global_number, common->version); + + pr_info(" brdgCtl: %08x\n", data->brdgCtl); + + pr_info(" portStatusReg: %08x\n", data->portStatusReg); + pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); + pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); + + pr_info(" deviceStatus: %08x\n", data->deviceStatus); + pr_info(" slotStatus: %08x\n", data->slotStatus); + pr_info(" linkStatus: %08x\n", data->linkStatus); + pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); + pr_info(" devSecStatus: %08x\n", data->devSecStatus); + + pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); + pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); + pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); + pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); + pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); + pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); + pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); + pr_info(" sourceId: %08x\n", data->sourceId); + + pr_info(" errorClass: %016llx\n", data->errorClass); + pr_info(" correlator: %016llx\n", data->correlator); + pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr); + pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr); + pr_info(" lemFir: %016llx\n", data->lemFir); + pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); + pr_info(" lemWOF: %016llx\n", data->lemWOF); + pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); + pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); + pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); + pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); + pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); + pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); + pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); + pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); + pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); + pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); + pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); + pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); + pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); + pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); + pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); + pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); + + for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { + if ((data->pestA[i] >> 63) == 0 && + (data->pestB[i] >> 63) == 0) + continue; + + pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); + pr_info(" PESTB: %016llx\n", data->pestB[i]); + } +} + +static void ioda_eeh_phb_diag(struct pci_controller *hose) +{ + struct pnv_phb *phb = hose->private_data; + struct OpalIoPhbErrorCommon *common; + long rc; + + common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; + rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); + if (rc != OPAL_SUCCESS) { + pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", + __func__, hose->global_number, rc); + return; + } + + switch (common->ioType) { + case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: + ioda_eeh_p7ioc_phb_diag(hose, common); + break; + default: + pr_warning("%s: Unrecognized I/O chip %d\n", + __func__, common->ioType); + } +} + +static int ioda_eeh_get_phb_pe(struct pci_controller *hose, + struct eeh_pe **pe) +{ + struct eeh_pe *phb_pe; + + phb_pe = eeh_phb_pe_get(hose); + if (!phb_pe) { + pr_warning("%s Can't find PE for PHB#%d\n", + __func__, hose->global_number); + return -EEXIST; + } + + *pe = phb_pe; + return 0; +} + +static int ioda_eeh_get_pe(struct pci_controller *hose, + u16 pe_no, struct eeh_pe **pe) +{ + struct eeh_pe *phb_pe, *dev_pe; + struct eeh_dev dev; + + /* Find the PHB PE */ + if (ioda_eeh_get_phb_pe(hose, &phb_pe)) + return -EEXIST; + + /* Find the PE according to PE# */ + memset(&dev, 0, sizeof(struct eeh_dev)); + dev.phb = hose; + dev.pe_config_addr = pe_no; + dev_pe = eeh_pe_get(&dev); + if (!dev_pe) { + pr_warning("%s: Can't find PE for PHB#%x - PE#%x\n", + __func__, hose->global_number, pe_no); + return -EEXIST; + } + + *pe = dev_pe; + return 0; +} + +/** + * ioda_eeh_next_error - Retrieve next error for EEH core to handle + * @pe: The affected PE + * + * The function is expected to be called by EEH core while it gets + * special EEH event (without binding PE). The function calls to + * OPAL APIs for next error to handle. The informational error is + * handled internally by platform. However, the dead IOC, dead PHB, + * fenced PHB and frozen PE should be handled by EEH core eventually. + */ +static int ioda_eeh_next_error(struct eeh_pe **pe) +{ + struct pci_controller *hose, *tmp; + struct pnv_phb *phb; + u64 frozen_pe_no; + u16 err_type, severity; + long rc; + int ret = 1; + + /* + * While running here, it's safe to purge the event queue. + * And we should keep the cached OPAL notifier event sychronized + * between the kernel and firmware. + */ + eeh_remove_event(NULL); + opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); + + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + /* + * If the subordinate PCI buses of the PHB has been + * removed, we needn't take care of it any more. + */ + phb = hose->private_data; + if (phb->eeh_state & PNV_EEH_STATE_REMOVED) + continue; + + rc = opal_pci_next_error(phb->opal_id, + &frozen_pe_no, &err_type, &severity); + + /* If OPAL API returns error, we needn't proceed */ + if (rc != OPAL_SUCCESS) { + IODA_EEH_DBG("%s: Invalid return value on " + "PHB#%x (0x%lx) from opal_pci_next_error", + __func__, hose->global_number, rc); + continue; + } + + /* If the PHB doesn't have error, stop processing */ + if (err_type == OPAL_EEH_NO_ERROR || + severity == OPAL_EEH_SEV_NO_ERROR) { + IODA_EEH_DBG("%s: No error found on PHB#%x\n", + __func__, hose->global_number); + continue; + } + + /* + * Processing the error. We're expecting the error with + * highest priority reported upon multiple errors on the + * specific PHB. + */ + IODA_EEH_DBG("%s: Error (%d, %d, %d) on PHB#%x\n", + err_type, severity, pe_no, hose->global_number); + switch (err_type) { + case OPAL_EEH_IOC_ERROR: + if (severity == OPAL_EEH_SEV_IOC_DEAD) { + list_for_each_entry_safe(hose, tmp, + &hose_list, list_node) { + phb = hose->private_data; + phb->eeh_state |= PNV_EEH_STATE_REMOVED; + } + + pr_err("EEH: dead IOC detected\n"); + ret = 4; + goto out; + } else if (severity == OPAL_EEH_SEV_INF) { + pr_info("EEH: IOC informative error " + "detected\n"); + ioda_eeh_hub_diag(hose); + } + + break; + case OPAL_EEH_PHB_ERROR: + if (severity == OPAL_EEH_SEV_PHB_DEAD) { + if (ioda_eeh_get_phb_pe(hose, pe)) + break; + + pr_err("EEH: dead PHB#%x detected\n", + hose->global_number); + phb->eeh_state |= PNV_EEH_STATE_REMOVED; + ret = 3; + goto out; + } else if (severity == OPAL_EEH_SEV_PHB_FENCED) { + if (ioda_eeh_get_phb_pe(hose, pe)) + break; + + pr_err("EEH: fenced PHB#%x detected\n", + hose->global_number); + ret = 2; + goto out; + } else if (severity == OPAL_EEH_SEV_INF) { + pr_info("EEH: PHB#%x informative error " + "detected\n", + hose->global_number); + ioda_eeh_phb_diag(hose); + } + + break; + case OPAL_EEH_PE_ERROR: + if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) + break; + + pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", + (*pe)->addr, (*pe)->phb->global_number); + ret = 1; + goto out; + } + } + + ret = 0; +out: + return ret; +} + +struct pnv_eeh_ops ioda_eeh_ops = { + .post_init = ioda_eeh_post_init, + .set_option = ioda_eeh_set_option, + .get_state = ioda_eeh_get_state, + .reset = ioda_eeh_reset, + .get_log = ioda_eeh_get_log, + .configure_bridge = ioda_eeh_configure_bridge, + .next_error = ioda_eeh_next_error +}; diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c new file mode 100644 index 000000000000..969cce73055a --- /dev/null +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -0,0 +1,379 @@ +/* + * The file intends to implement the platform dependent EEH operations on + * powernv platform. Actually, the powernv was created in order to fully + * hypervisor support. + * + * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/pci.h> +#include <linux/proc_fs.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> + +#include <asm/eeh.h> +#include <asm/eeh_event.h> +#include <asm/firmware.h> +#include <asm/io.h> +#include <asm/iommu.h> +#include <asm/machdep.h> +#include <asm/msi_bitmap.h> +#include <asm/opal.h> +#include <asm/ppc-pci.h> + +#include "powernv.h" +#include "pci.h" + +/** + * powernv_eeh_init - EEH platform dependent initialization + * + * EEH platform dependent initialization on powernv + */ +static int powernv_eeh_init(void) +{ + /* We require OPALv3 */ + if (!firmware_has_feature(FW_FEATURE_OPALv3)) { + pr_warning("%s: OPALv3 is required !\n", __func__); + return -EINVAL; + } + + /* Set EEH probe mode */ + eeh_probe_mode_set(EEH_PROBE_MODE_DEV); + + return 0; +} + +/** + * powernv_eeh_post_init - EEH platform dependent post initialization + * + * EEH platform dependent post initialization on powernv. When + * the function is called, the EEH PEs and devices should have + * been built. If the I/O cache staff has been built, EEH is + * ready to supply service. + */ +static int powernv_eeh_post_init(void) +{ + struct pci_controller *hose; + struct pnv_phb *phb; + int ret = 0; + + list_for_each_entry(hose, &hose_list, list_node) { + phb = hose->private_data; + + if (phb->eeh_ops && phb->eeh_ops->post_init) { + ret = phb->eeh_ops->post_init(hose); + if (ret) + break; + } + } + + return ret; +} + +/** + * powernv_eeh_dev_probe - Do probe on PCI device + * @dev: PCI device + * @flag: unused + * + * When EEH module is installed during system boot, all PCI devices + * are checked one by one to see if it supports EEH. The function + * is introduced for the purpose. By default, EEH has been enabled + * on all PCI devices. That's to say, we only need do necessary + * initialization on the corresponding eeh device and create PE + * accordingly. + * + * It's notable that's unsafe to retrieve the EEH device through + * the corresponding PCI device. During the PCI device hotplug, which + * was possiblly triggered by EEH core, the binding between EEH device + * and the PCI device isn't built yet. + */ +static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) +{ + struct pci_controller *hose = pci_bus_to_host(dev->bus); + struct pnv_phb *phb = hose->private_data; + struct device_node *dn = pci_device_to_OF_node(dev); + struct eeh_dev *edev = of_node_to_eeh_dev(dn); + + /* + * When probing the root bridge, which doesn't have any + * subordinate PCI devices. We don't have OF node for + * the root bridge. So it's not reasonable to continue + * the probing. + */ + if (!dn || !edev) + return 0; + + /* Skip for PCI-ISA bridge */ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA) + return 0; + + /* Initialize eeh device */ + edev->class_code = dev->class; + edev->mode = 0; + edev->config_addr = ((dev->bus->number << 8) | dev->devfn); + edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff); + + /* Create PE */ + eeh_add_to_parent_pe(edev); + + /* + * Enable EEH explicitly so that we will do EEH check + * while accessing I/O stuff + * + * FIXME: Enable that for PHB3 later + */ + if (phb->type == PNV_PHB_IODA1) + eeh_subsystem_enabled = 1; + + /* Save memory bars */ + eeh_save_bars(edev); + + return 0; +} + +/** + * powernv_eeh_set_option - Initialize EEH or MMIO/DMA reenable + * @pe: EEH PE + * @option: operation to be issued + * + * The function is used to control the EEH functionality globally. + * Currently, following options are support according to PAPR: + * Enable EEH, Disable EEH, Enable MMIO and Enable DMA + */ +static int powernv_eeh_set_option(struct eeh_pe *pe, int option) +{ + struct pci_controller *hose = pe->phb; + struct pnv_phb *phb = hose->private_data; + int ret = -EEXIST; + + /* + * What we need do is pass it down for hardware + * implementation to handle it. + */ + if (phb->eeh_ops && phb->eeh_ops->set_option) + ret = phb->eeh_ops->set_option(pe, option); + + return ret; +} + +/** + * powernv_eeh_get_pe_addr - Retrieve PE address + * @pe: EEH PE + * + * Retrieve the PE address according to the given tranditional + * PCI BDF (Bus/Device/Function) address. + */ +static int powernv_eeh_get_pe_addr(struct eeh_pe *pe) +{ + return pe->addr; +} + +/** + * powernv_eeh_get_state - Retrieve PE state + * @pe: EEH PE + * @delay: delay while PE state is temporarily unavailable + * + * Retrieve the state of the specified PE. For IODA-compitable + * platform, it should be retrieved from IODA table. Therefore, + * we prefer passing down to hardware implementation to handle + * it. + */ +static int powernv_eeh_get_state(struct eeh_pe *pe, int *delay) +{ + struct pci_controller *hose = pe->phb; + struct pnv_phb *phb = hose->private_data; + int ret = EEH_STATE_NOT_SUPPORT; + + if (phb->eeh_ops && phb->eeh_ops->get_state) { + ret = phb->eeh_ops->get_state(pe); + + /* + * If the PE state is temporarily unavailable, + * to inform the EEH core delay for default + * period (1 second) + */ + if (delay) { + *delay = 0; + if (ret & EEH_STATE_UNAVAILABLE) + *delay = 1000; + } + } + + return ret; +} + +/** + * powernv_eeh_reset - Reset the specified PE + * @pe: EEH PE + * @option: reset option + * + * Reset the specified PE + */ +static int powernv_eeh_reset(struct eeh_pe *pe, int option) +{ + struct pci_controller *hose = pe->phb; + struct pnv_phb *phb = hose->private_data; + int ret = -EEXIST; + + if (phb->eeh_ops && phb->eeh_ops->reset) + ret = phb->eeh_ops->reset(pe, option); + + return ret; +} + +/** + * powernv_eeh_wait_state - Wait for PE state + * @pe: EEH PE + * @max_wait: maximal period in microsecond + * + * Wait for the state of associated PE. It might take some time + * to retrieve the PE's state. + */ +static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait) +{ + int ret; + int mwait; + + while (1) { + ret = powernv_eeh_get_state(pe, &mwait); + + /* + * If the PE's state is temporarily unavailable, + * we have to wait for the specified time. Otherwise, + * the PE's state will be returned immediately. + */ + if (ret != EEH_STATE_UNAVAILABLE) + return ret; + + max_wait -= mwait; + if (max_wait <= 0) { + pr_warning("%s: Timeout getting PE#%x's state (%d)\n", + __func__, pe->addr, max_wait); + return EEH_STATE_NOT_SUPPORT; + } + + msleep(mwait); + } + + return EEH_STATE_NOT_SUPPORT; +} + +/** + * powernv_eeh_get_log - Retrieve error log + * @pe: EEH PE + * @severity: temporary or permanent error log + * @drv_log: driver log to be combined with retrieved error log + * @len: length of driver log + * + * Retrieve the temporary or permanent error from the PE. + */ +static int powernv_eeh_get_log(struct eeh_pe *pe, int severity, + char *drv_log, unsigned long len) +{ + struct pci_controller *hose = pe->phb; + struct pnv_phb *phb = hose->private_data; + int ret = -EEXIST; + + if (phb->eeh_ops && phb->eeh_ops->get_log) + ret = phb->eeh_ops->get_log(pe, severity, drv_log, len); + + return ret; +} + +/** + * powernv_eeh_configure_bridge - Configure PCI bridges in the indicated PE + * @pe: EEH PE + * + * The function will be called to reconfigure the bridges included + * in the specified PE so that the mulfunctional PE would be recovered + * again. + */ +static int powernv_eeh_configure_bridge(struct eeh_pe *pe) +{ + struct pci_controller *hose = pe->phb; + struct pnv_phb *phb = hose->private_data; + int ret = 0; + + if (phb->eeh_ops && phb->eeh_ops->configure_bridge) + ret = phb->eeh_ops->configure_bridge(pe); + + return ret; +} + +/** + * powernv_eeh_next_error - Retrieve next EEH error to handle + * @pe: Affected PE + * + * Using OPAL API, to retrieve next EEH error for EEH core to handle + */ +static int powernv_eeh_next_error(struct eeh_pe **pe) +{ + struct pci_controller *hose; + struct pnv_phb *phb = NULL; + + list_for_each_entry(hose, &hose_list, list_node) { + phb = hose->private_data; + break; + } + + if (phb && phb->eeh_ops->next_error) + return phb->eeh_ops->next_error(pe); + + return -EEXIST; +} + +static struct eeh_ops powernv_eeh_ops = { + .name = "powernv", + .init = powernv_eeh_init, + .post_init = powernv_eeh_post_init, + .of_probe = NULL, + .dev_probe = powernv_eeh_dev_probe, + .set_option = powernv_eeh_set_option, + .get_pe_addr = powernv_eeh_get_pe_addr, + .get_state = powernv_eeh_get_state, + .reset = powernv_eeh_reset, + .wait_state = powernv_eeh_wait_state, + .get_log = powernv_eeh_get_log, + .configure_bridge = powernv_eeh_configure_bridge, + .read_config = pnv_pci_cfg_read, + .write_config = pnv_pci_cfg_write, + .next_error = powernv_eeh_next_error +}; + +/** + * eeh_powernv_init - Register platform dependent EEH operations + * + * EEH initialization on powernv platform. This function should be + * called before any EEH related functions. + */ +static int __init eeh_powernv_init(void) +{ + int ret = -EINVAL; + + if (!machine_is(powernv)) + return ret; + + ret = eeh_ops_register(&powernv_eeh_ops); + if (!ret) + pr_info("EEH: PowerNV platform initialized\n"); + else + pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret); + + return ret; +} + +early_initcall(eeh_powernv_init); diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 6fabe92eafb6..e88863ffb135 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -107,4 +107,7 @@ OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR); OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS); OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS); OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED); +OPAL_CALL(opal_pci_next_error, OPAL_PCI_NEXT_ERROR); +OPAL_CALL(opal_pci_poll, OPAL_PCI_POLL); OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI); +OPAL_CALL(opal_pci_get_phb_diag_data2, OPAL_PCI_GET_PHB_DIAG_DATA2); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 628c564ceadb..106301fd2fa5 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -15,6 +15,7 @@ #include <linux/of.h> #include <linux/of_platform.h> #include <linux/interrupt.h> +#include <linux/notifier.h> #include <linux/slab.h> #include <asm/opal.h> #include <asm/firmware.h> @@ -31,6 +32,10 @@ static DEFINE_SPINLOCK(opal_write_lock); extern u64 opal_mc_secondary_handler[]; static unsigned int *opal_irqs; static unsigned int opal_irq_count; +static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); +static DEFINE_SPINLOCK(opal_notifier_lock); +static uint64_t last_notified_mask = 0x0ul; +static atomic_t opal_notifier_hold = ATOMIC_INIT(0); int __init early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data) @@ -95,6 +100,68 @@ static int __init opal_register_exception_handlers(void) early_initcall(opal_register_exception_handlers); +int opal_notifier_register(struct notifier_block *nb) +{ + if (!nb) { + pr_warning("%s: Invalid argument (%p)\n", + __func__, nb); + return -EINVAL; + } + + atomic_notifier_chain_register(&opal_notifier_head, nb); + return 0; +} + +static void opal_do_notifier(uint64_t events) +{ + unsigned long flags; + uint64_t changed_mask; + + if (atomic_read(&opal_notifier_hold)) + return; + + spin_lock_irqsave(&opal_notifier_lock, flags); + changed_mask = last_notified_mask ^ events; + last_notified_mask = events; + spin_unlock_irqrestore(&opal_notifier_lock, flags); + + /* + * We feed with the event bits and changed bits for + * enough information to the callback. + */ + atomic_notifier_call_chain(&opal_notifier_head, + events, (void *)changed_mask); +} + +void opal_notifier_update_evt(uint64_t evt_mask, + uint64_t evt_val) +{ + unsigned long flags; + + spin_lock_irqsave(&opal_notifier_lock, flags); + last_notified_mask &= ~evt_mask; + last_notified_mask |= evt_val; + spin_unlock_irqrestore(&opal_notifier_lock, flags); +} + +void opal_notifier_enable(void) +{ + int64_t rc; + uint64_t evt = 0; + + atomic_set(&opal_notifier_hold, 0); + + /* Process pending events */ + rc = opal_poll_events(&evt); + if (rc == OPAL_SUCCESS && evt) + opal_do_notifier(evt); +} + +void opal_notifier_disable(void) +{ + atomic_set(&opal_notifier_hold, 1); +} + int opal_get_chars(uint32_t vtermno, char *buf, int count) { s64 len, rc; @@ -297,7 +364,7 @@ static irqreturn_t opal_interrupt(int irq, void *data) opal_handle_interrupt(virq_to_hw(irq), &events); - /* XXX TODO: Do something with the events */ + opal_do_notifier(events); return IRQ_HANDLED; } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 9c9d15e4cdf2..49b57b9f835d 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/pci.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> @@ -32,6 +33,7 @@ #include <asm/iommu.h> #include <asm/tce.h> #include <asm/xics.h> +#include <asm/debug.h> #include "powernv.h" #include "pci.h" @@ -441,6 +443,17 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev set_iommu_table_base(&pdev->dev, &pe->tce32_table); } +static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + set_iommu_table_base(&dev->dev, &pe->tce32_table); + if (dev->subordinate) + pnv_ioda_setup_bus_dma(pe, dev->subordinate); + } +} + static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl, u64 *startp, u64 *endp) { @@ -595,6 +608,12 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, TCE_PCI_SWINV_PAIR; } iommu_init_table(tbl, phb->hose->node); + iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number); + + if (pe->pdev) + set_iommu_table_base(&pe->pdev->dev, tbl); + else + pnv_ioda_setup_bus_dma(pe, pe->pbus); return; fail: @@ -667,6 +686,11 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, } iommu_init_table(tbl, phb->hose->node); + if (pe->pdev) + set_iommu_table_base(&pe->pdev->dev, tbl); + else + pnv_ioda_setup_bus_dma(pe, pe->pbus); + return; fail: if (pe->tce32_seg >= 0) @@ -968,11 +992,38 @@ static void pnv_pci_ioda_setup_DMA(void) } } +static void pnv_pci_ioda_create_dbgfs(void) +{ +#ifdef CONFIG_DEBUG_FS + struct pci_controller *hose, *tmp; + struct pnv_phb *phb; + char name[16]; + + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + phb = hose->private_data; + + sprintf(name, "PCI%04x", hose->global_number); + phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root); + if (!phb->dbgfs) + pr_warning("%s: Error on creating debugfs on PHB#%x\n", + __func__, hose->global_number); + } +#endif /* CONFIG_DEBUG_FS */ +} + static void pnv_pci_ioda_fixup(void) { pnv_pci_ioda_setup_PEs(); pnv_pci_ioda_setup_seg(); pnv_pci_ioda_setup_DMA(); + + pnv_pci_ioda_create_dbgfs(); + +#ifdef CONFIG_EEH + eeh_probe_mode_set(EEH_PROBE_MODE_DEV); + eeh_addr_cache_build(); + eeh_init(); +#endif } /* @@ -1049,7 +1100,8 @@ static void pnv_pci_ioda_shutdown(struct pnv_phb *phb) OPAL_ASSERT_RESET); } -void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) +void __init pnv_pci_init_ioda_phb(struct device_node *np, + u64 hub_id, int ioda_type) { struct pci_controller *hose; static int primary = 1; @@ -1087,6 +1139,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) hose->first_busno = 0; hose->last_busno = 0xff; hose->private_data = phb; + phb->hub_id = hub_id; phb->opal_id = phb_id; phb->type = ioda_type; @@ -1172,6 +1225,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) phb->ioda.io_size, phb->ioda.io_segsize); phb->hose->ops = &pnv_pci_ops; +#ifdef CONFIG_EEH + phb->eeh_ops = &ioda_eeh_ops; +#endif /* Setup RID -> PE mapping function */ phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe; @@ -1212,7 +1268,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) void pnv_pci_init_ioda2_phb(struct device_node *np) { - pnv_pci_init_ioda_phb(np, PNV_PHB_IODA2); + pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); } void __init pnv_pci_init_ioda_hub(struct device_node *np) @@ -1235,6 +1291,6 @@ void __init pnv_pci_init_ioda_hub(struct device_node *np) for_each_child_of_node(np, phbn) { /* Look for IODA1 PHBs */ if (of_device_is_compatible(phbn, "ibm,ioda-phb")) - pnv_pci_init_ioda_phb(phbn, PNV_PHB_IODA1); + pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1); } } diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c index 92b37a0186c9..b68db6325c1b 100644 --- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c +++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c @@ -86,13 +86,16 @@ static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { } static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) { - if (phb->p5ioc2.iommu_table.it_map == NULL) + if (phb->p5ioc2.iommu_table.it_map == NULL) { iommu_init_table(&phb->p5ioc2.iommu_table, phb->hose->node); + iommu_register_group(&phb->p5ioc2.iommu_table, + pci_domain_nr(phb->hose->bus), phb->opal_id); + } set_iommu_table_base(&pdev->dev, &phb->p5ioc2.iommu_table); } -static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, +static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id, void *tce_mem, u64 tce_size) { struct pnv_phb *phb; @@ -133,6 +136,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, phb->hose->first_busno = 0; phb->hose->last_busno = 0xff; phb->hose->private_data = phb; + phb->hub_id = hub_id; phb->opal_id = phb_id; phb->type = PNV_PHB_P5IOC2; phb->model = PNV_PHB_MODEL_P5IOC2; @@ -226,7 +230,8 @@ void __init pnv_pci_init_p5ioc2_hub(struct device_node *np) for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) { - pnv_pci_init_p5ioc2_phb(phbn, tce_mem, tce_per_phb); + pnv_pci_init_p5ioc2_phb(phbn, hub_id, + tce_mem, tce_per_phb); tce_mem += tce_per_phb; } } diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 277343cc6a3d..a28d3b5e6393 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -20,6 +20,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> +#include <linux/iommu.h> #include <asm/sections.h> #include <asm/io.h> @@ -32,6 +33,8 @@ #include <asm/iommu.h> #include <asm/tce.h> #include <asm/firmware.h> +#include <asm/eeh_event.h> +#include <asm/eeh.h> #include "powernv.h" #include "pci.h" @@ -202,7 +205,8 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) spin_lock_irqsave(&phb->lock, flags); - rc = opal_pci_get_phb_diag_data(phb->opal_id, phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); + rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, + PNV_PCI_DIAG_BUF_SIZE); has_diag = (rc == OPAL_SUCCESS); rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, @@ -227,43 +231,50 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) spin_unlock_irqrestore(&phb->lock, flags); } -static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus, - u32 bdfn) +static void pnv_pci_config_check_eeh(struct pnv_phb *phb, + struct device_node *dn) { s64 rc; u8 fstate; u16 pcierr; u32 pe_no; - /* Get PE# if we support IODA */ - pe_no = phb->bdfn_to_pe ? phb->bdfn_to_pe(phb, bus, bdfn & 0xff) : 0; + /* + * Get the PE#. During the PCI probe stage, we might not + * setup that yet. So all ER errors should be mapped to + * PE#0 + */ + pe_no = PCI_DN(dn)->pe_number; + if (pe_no == IODA_INVALID_PE) + pe_no = 0; /* Read freeze status */ rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr, NULL); if (rc) { - pr_warning("PCI %d: Failed to read EEH status for PE#%d," - " err %lld\n", phb->hose->global_number, pe_no, rc); + pr_warning("%s: Can't read EEH status (PE#%d) for " + "%s, err %lld\n", + __func__, pe_no, dn->full_name, rc); return; } - cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n", - bdfn, pe_no, fstate); + cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n", + (PCI_DN(dn)->busno << 8) | (PCI_DN(dn)->devfn), + pe_no, fstate); if (fstate != 0) pnv_pci_handle_eeh_config(phb, pe_no); } -static int pnv_pci_read_config(struct pci_bus *bus, - unsigned int devfn, - int where, int size, u32 *val) +int pnv_pci_cfg_read(struct device_node *dn, + int where, int size, u32 *val) { - struct pci_controller *hose = pci_bus_to_host(bus); - struct pnv_phb *phb = hose->private_data; - u32 bdfn = (((uint64_t)bus->number) << 8) | devfn; + struct pci_dn *pdn = PCI_DN(dn); + struct pnv_phb *phb = pdn->phb->private_data; + u32 bdfn = (pdn->busno << 8) | pdn->devfn; +#ifdef CONFIG_EEH + struct eeh_pe *phb_pe = NULL; +#endif s64 rc; - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - switch (size) { case 1: { u8 v8; @@ -287,28 +298,43 @@ static int pnv_pci_read_config(struct pci_bus *bus, default: return PCIBIOS_FUNC_NOT_SUPPORTED; } - cfg_dbg("pnv_pci_read_config bus: %x devfn: %x +%x/%x -> %08x\n", - bus->number, devfn, where, size, *val); - - /* Check if the PHB got frozen due to an error (no response) */ - pnv_pci_config_check_eeh(phb, bus, bdfn); + cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n", + __func__, pdn->busno, pdn->devfn, where, size, *val); + + /* + * Check if the specified PE has been put into frozen + * state. On the other hand, we needn't do that while + * the PHB has been put into frozen state because of + * PHB-fatal errors. + */ +#ifdef CONFIG_EEH + phb_pe = eeh_phb_pe_get(pdn->phb); + if (phb_pe && (phb_pe->state & EEH_PE_ISOLATED)) + return PCIBIOS_SUCCESSFUL; + + if (phb->eeh_state & PNV_EEH_STATE_ENABLED) { + if (*val == EEH_IO_ERROR_VALUE(size) && + eeh_dev_check_failure(of_node_to_eeh_dev(dn))) + return PCIBIOS_DEVICE_NOT_FOUND; + } else { + pnv_pci_config_check_eeh(phb, dn); + } +#else + pnv_pci_config_check_eeh(phb, dn); +#endif return PCIBIOS_SUCCESSFUL; } -static int pnv_pci_write_config(struct pci_bus *bus, - unsigned int devfn, - int where, int size, u32 val) +int pnv_pci_cfg_write(struct device_node *dn, + int where, int size, u32 val) { - struct pci_controller *hose = pci_bus_to_host(bus); - struct pnv_phb *phb = hose->private_data; - u32 bdfn = (((uint64_t)bus->number) << 8) | devfn; - - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; + struct pci_dn *pdn = PCI_DN(dn); + struct pnv_phb *phb = pdn->phb->private_data; + u32 bdfn = (pdn->busno << 8) | pdn->devfn; - cfg_dbg("pnv_pci_write_config bus: %x devfn: %x +%x/%x -> %08x\n", - bus->number, devfn, where, size, val); + cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n", + pdn->busno, pdn->devfn, where, size, val); switch (size) { case 1: opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); @@ -322,14 +348,54 @@ static int pnv_pci_write_config(struct pci_bus *bus, default: return PCIBIOS_FUNC_NOT_SUPPORTED; } + /* Check if the PHB got frozen due to an error (no response) */ - pnv_pci_config_check_eeh(phb, bus, bdfn); +#ifdef CONFIG_EEH + if (!(phb->eeh_state & PNV_EEH_STATE_ENABLED)) + pnv_pci_config_check_eeh(phb, dn); +#else + pnv_pci_config_check_eeh(phb, dn); +#endif return PCIBIOS_SUCCESSFUL; } +static int pnv_pci_read_config(struct pci_bus *bus, + unsigned int devfn, + int where, int size, u32 *val) +{ + struct device_node *dn, *busdn = pci_bus_to_OF_node(bus); + struct pci_dn *pdn; + + for (dn = busdn->child; dn; dn = dn->sibling) { + pdn = PCI_DN(dn); + if (pdn && pdn->devfn == devfn) + return pnv_pci_cfg_read(dn, where, size, val); + } + + *val = 0xFFFFFFFF; + return PCIBIOS_DEVICE_NOT_FOUND; + +} + +static int pnv_pci_write_config(struct pci_bus *bus, + unsigned int devfn, + int where, int size, u32 val) +{ + struct device_node *dn, *busdn = pci_bus_to_OF_node(bus); + struct pci_dn *pdn; + + for (dn = busdn->child; dn; dn = dn->sibling) { + pdn = PCI_DN(dn); + if (pdn && pdn->devfn == devfn) + return pnv_pci_cfg_write(dn, where, size, val); + } + + return PCIBIOS_DEVICE_NOT_FOUND; +} + struct pci_ops pnv_pci_ops = { - .read = pnv_pci_read_config, + .read = pnv_pci_read_config, .write = pnv_pci_write_config, }; @@ -412,6 +478,7 @@ static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose) pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)), be32_to_cpup(sizep), 0); iommu_init_table(tbl, hose->node); + iommu_register_group(tbl, pci_domain_nr(hose->bus), 0); /* Deal with SW invalidated TCEs when needed (BML way) */ swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info", diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 25d76c4df50b..d633c64e05a1 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -66,15 +66,43 @@ struct pnv_ioda_pe { struct list_head list; }; +/* IOC dependent EEH operations */ +#ifdef CONFIG_EEH +struct pnv_eeh_ops { + int (*post_init)(struct pci_controller *hose); + int (*set_option)(struct eeh_pe *pe, int option); + int (*get_state)(struct eeh_pe *pe); + int (*reset)(struct eeh_pe *pe, int option); + int (*get_log)(struct eeh_pe *pe, int severity, + char *drv_log, unsigned long len); + int (*configure_bridge)(struct eeh_pe *pe); + int (*next_error)(struct eeh_pe **pe); +}; + +#define PNV_EEH_STATE_ENABLED (1 << 0) /* EEH enabled */ +#define PNV_EEH_STATE_REMOVED (1 << 1) /* PHB removed */ + +#endif /* CONFIG_EEH */ + struct pnv_phb { struct pci_controller *hose; enum pnv_phb_type type; enum pnv_phb_model model; + u64 hub_id; u64 opal_id; void __iomem *regs; int initialized; spinlock_t lock; +#ifdef CONFIG_EEH + struct pnv_eeh_ops *eeh_ops; + int eeh_state; +#endif + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs; +#endif + #ifdef CONFIG_PCI_MSI unsigned int msi_base; unsigned int msi32_support; @@ -150,7 +178,14 @@ struct pnv_phb { }; extern struct pci_ops pnv_pci_ops; +#ifdef CONFIG_EEH +extern struct pnv_eeh_ops ioda_eeh_ops; +#endif +int pnv_pci_cfg_read(struct device_node *dn, + int where, int size, u32 *val); +int pnv_pci_cfg_write(struct device_node *dn, + int where, int size, u32 val); extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, void *tce_mem, u64 tce_size, u64 dma_offset); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index d4459bfc92f7..84438af96c05 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -93,6 +93,8 @@ static void __noreturn pnv_restart(char *cmd) { long rc = OPAL_BUSY; + opal_notifier_disable(); + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_cec_reboot(); if (rc == OPAL_BUSY_EVENT) @@ -108,6 +110,8 @@ static void __noreturn pnv_power_off(void) { long rc = OPAL_BUSY; + opal_notifier_disable(); + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_cec_power_down(0); if (rc == OPAL_BUSY_EVENT) diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 88c9459c3e07..89e3857af4e0 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -40,7 +40,7 @@ #define DBG(fmt...) #endif -static void __cpuinit pnv_smp_setup_cpu(int cpu) +static void pnv_smp_setup_cpu(int cpu) { if (cpu != boot_cpuid) xics_setup_cpu(); @@ -51,7 +51,7 @@ static int pnv_smp_cpu_bootable(unsigned int nr) /* Special case - we inhibit secondary thread startup * during boot if the user requests it. */ - if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { + if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) { if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) return 0; if (smt_enabled_at_boot diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c index 177a2f70700c..3e270e3412ae 100644 --- a/arch/powerpc/platforms/ps3/htab.c +++ b/arch/powerpc/platforms/ps3/htab.c @@ -109,7 +109,8 @@ static long ps3_hpte_remove(unsigned long hpte_group) } static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long vpn, int psize, int ssize, int local) + unsigned long vpn, int psize, int apsize, + int ssize, int local) { int result; u64 hpte_v, want_v, hpte_rs; @@ -162,7 +163,7 @@ static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, } static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn, - int psize, int ssize, int local) + int psize, int apsize, int ssize, int local) { unsigned long flags; int result; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 4459eff7a75a..1bd3399146ed 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -33,11 +33,6 @@ config PPC_SPLPAR processors, that is, which share physical processors between two or more partitions. -config EEH - bool - depends on PPC_PSERIES && PCI - default y - config PSERIES_MSI bool depends on PCI_MSI && EEH diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 53866e537a92..8ae010381316 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -6,9 +6,7 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \ firmware.o power.o dlpar.o mobility.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCANLOG) += scanlog.o -obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ - eeh_driver.o eeh_event.o eeh_sysfs.o \ - eeh_pseries.o +obj-$(CONFIG_EEH) += eeh_pseries.o obj-$(CONFIG_KEXEC) += kexec.o obj-$(CONFIG_PCI) += pci.o pci_dlpar.o obj-$(CONFIG_PSERIES_MSI) += msi.o diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c index ef9d9d84c7d5..5ea88d1541f7 100644 --- a/arch/powerpc/platforms/pseries/io_event_irq.c +++ b/arch/powerpc/platforms/pseries/io_event_irq.c @@ -115,7 +115,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) * by scope or event type alone. For example, Torrent ISR route change * event is reported with scope 0x00 (Not Applicatable) rather than * 0x3B (Torrent-hub). It is better to let the clients to identify - * who owns the the event. + * who owns the event. */ static irqreturn_t ioei_interrupt(int irq, void *dev_id) diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 86ae364900d6..23fc1dcf4434 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -614,6 +614,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) iommu_table_setparms(pci->phb, dn, tbl); pci->iommu_table = iommu_init_table(tbl, pci->phb->node); + iommu_register_group(tbl, pci_domain_nr(bus), 0); /* Divide the rest (1.75GB) among the children */ pci->phb->dma_window_size = 0x80000000ul; @@ -658,6 +659,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) ppci->phb->node); iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window); ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); + iommu_register_group(tbl, pci_domain_nr(bus), 0); pr_debug(" created table: %p\n", ppci->iommu_table); } } @@ -684,6 +686,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) phb->node); iommu_table_setparms(phb, dn, tbl); PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node); + iommu_register_group(tbl, pci_domain_nr(phb->bus), 0); set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table); return; } @@ -1184,6 +1187,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pci->phb->node); iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window); pci->iommu_table = iommu_init_table(tbl, pci->phb->node); + iommu_register_group(tbl, pci_domain_nr(pci->phb->bus), 0); pr_debug(" created table: %p\n", pci->iommu_table); } else { pr_debug(" found DMA window, table: %p\n", pci->iommu_table); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 6d62072a7d5a..02d6e21619bb 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -45,6 +45,13 @@ #include "plpar_wrappers.h" #include "pseries.h" +/* Flag bits for H_BULK_REMOVE */ +#define HBR_REQUEST 0x4000000000000000UL +#define HBR_RESPONSE 0x8000000000000000UL +#define HBR_END 0xc000000000000000UL +#define HBR_AVPN 0x0200000000000000UL +#define HBR_ANDCOND 0x0100000000000000UL + /* in hvCall.S */ EXPORT_SYMBOL(plpar_hcall); @@ -64,6 +71,9 @@ void vpa_init(int cpu) if (cpu_has_feature(CPU_FTR_ALTIVEC)) lppaca_of(cpu).vmxregs_in_use = 1; + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + lppaca_of(cpu).ebb_regs_in_use = 1; + addr = __pa(&lppaca_of(cpu)); ret = register_vpa(hwcpu, addr); @@ -240,7 +250,8 @@ static void pSeries_lpar_hptab_clear(void) static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long vpn, - int psize, int ssize, int local) + int psize, int apsize, + int ssize, int local) { unsigned long lpar_rc; unsigned long flags = (newpp & 7) | H_AVPN; @@ -328,7 +339,8 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, } static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, - int psize, int ssize, int local) + int psize, int apsize, + int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; @@ -345,6 +357,113 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, BUG_ON(lpar_rc != H_SUCCESS); } +/* + * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need + * to make sure that we avoid bouncing the hypervisor tlbie lock. + */ +#define PPC64_HUGE_HPTE_BATCH 12 + +static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, + unsigned long *vpn, int count, + int psize, int ssize) +{ + unsigned long param[8]; + int i = 0, pix = 0, rc; + unsigned long flags = 0; + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); + + if (lock_tlbie) + spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); + + for (i = 0; i < count; i++) { + + if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { + pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0, + ssize, 0); + } else { + param[pix] = HBR_REQUEST | HBR_AVPN | slot[i]; + param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize); + pix += 2; + if (pix == 8) { + rc = plpar_hcall9(H_BULK_REMOVE, param, + param[0], param[1], param[2], + param[3], param[4], param[5], + param[6], param[7]); + BUG_ON(rc != H_SUCCESS); + pix = 0; + } + } + } + if (pix) { + param[pix] = HBR_END; + rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], + param[2], param[3], param[4], param[5], + param[6], param[7]); + BUG_ON(rc != H_SUCCESS); + } + + if (lock_tlbie) + spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); +} + +static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm, + unsigned char *hpte_slot_array, + unsigned long addr, int psize) +{ + int ssize = 0, i, index = 0; + unsigned long s_addr = addr; + unsigned int max_hpte_count, valid; + unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH]; + unsigned long slot_array[PPC64_HUGE_HPTE_BATCH]; + unsigned long shift, hidx, vpn = 0, vsid, hash, slot; + + shift = mmu_psize_defs[psize].shift; + max_hpte_count = 1U << (PMD_SHIFT - shift); + + for (i = 0; i < max_hpte_count; i++) { + valid = hpte_valid(hpte_slot_array, i); + if (!valid) + continue; + hidx = hpte_hash_index(hpte_slot_array, i); + + /* get the vpn */ + addr = s_addr + (i * (1ul << shift)); + if (!is_kernel_addr(addr)) { + ssize = user_segment_size(addr); + vsid = get_vsid(mm->context.id, addr, ssize); + WARN_ON(vsid == 0); + } else { + vsid = get_kernel_vsid(addr, mmu_kernel_ssize); + ssize = mmu_kernel_ssize; + } + + vpn = hpt_vpn(addr, vsid, ssize); + hash = hpt_hash(vpn, shift, ssize); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + + slot_array[index] = slot; + vpn_array[index] = vpn; + if (index == PPC64_HUGE_HPTE_BATCH - 1) { + /* + * Now do a bluk invalidate + */ + __pSeries_lpar_hugepage_invalidate(slot_array, + vpn_array, + PPC64_HUGE_HPTE_BATCH, + psize, ssize); + index = 0; + } else + index++; + } + if (index) + __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, + index, psize, ssize); +} + static void pSeries_lpar_hpte_removebolted(unsigned long ea, int psize, int ssize) { @@ -356,17 +475,12 @@ static void pSeries_lpar_hpte_removebolted(unsigned long ea, slot = pSeries_lpar_hpte_find(vpn, psize, ssize); BUG_ON(slot == -1); - - pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0); + /* + * lpar doesn't use the passed actual page size + */ + pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0); } -/* Flag bits for H_BULK_REMOVE */ -#define HBR_REQUEST 0x4000000000000000UL -#define HBR_RESPONSE 0x8000000000000000UL -#define HBR_END 0xc000000000000000UL -#define HBR_AVPN 0x0200000000000000UL -#define HBR_ANDCOND 0x0100000000000000UL - /* * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie * lock. @@ -400,8 +514,11 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { + /* + * lpar doesn't use the passed actual page size + */ pSeries_lpar_hpte_invalidate(slot, vpn, psize, - ssize, local); + 0, ssize, local); } else { param[pix] = HBR_REQUEST | HBR_AVPN | slot; param[pix+1] = hpte_encode_avpn(vpn, psize, @@ -452,6 +569,7 @@ void __init hpte_init_lpar(void) ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; + ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; } #ifdef CONFIG_PPC_SMLPAR diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 8733a86ad52e..9f8671a44551 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -18,6 +18,7 @@ #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/kmsg_dump.h> +#include <linux/pstore.h> #include <linux/ctype.h> #include <linux/zlib.h> #include <asm/uaccess.h> @@ -29,6 +30,13 @@ /* Max bytes to read/write in one go */ #define NVRW_CNT 0x20 +/* + * Set oops header version to distingush between old and new format header. + * lnx,oops-log partition max size is 4000, header version > 4000 will + * help in identifying new header. + */ +#define OOPS_HDR_VERSION 5000 + static unsigned int nvram_size; static int nvram_fetch, nvram_store; static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ @@ -45,20 +53,23 @@ struct nvram_os_partition { int min_size; /* minimum acceptable size (0 means req_size) */ long size; /* size of data portion (excluding err_log_info) */ long index; /* offset of data portion of partition */ + bool os_partition; /* partition initialized by OS, not FW */ }; static struct nvram_os_partition rtas_log_partition = { .name = "ibm,rtas-log", .req_size = 2079, .min_size = 1055, - .index = -1 + .index = -1, + .os_partition = true }; static struct nvram_os_partition oops_log_partition = { .name = "lnx,oops-log", .req_size = 4000, .min_size = 2000, - .index = -1 + .index = -1, + .os_partition = true }; static const char *pseries_nvram_os_partitions[] = { @@ -67,6 +78,12 @@ static const char *pseries_nvram_os_partitions[] = { NULL }; +struct oops_log_info { + u16 version; + u16 report_length; + u64 timestamp; +} __attribute__((packed)); + static void oops_to_nvram(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); @@ -83,28 +100,28 @@ static unsigned long last_unread_rtas_event; /* timestamp */ * big_oops_buf[] holds the uncompressed text we're capturing. * - * oops_buf[] holds the compressed text, preceded by a prefix. - * The prefix is just a u16 holding the length of the compressed* text. - * (*Or uncompressed, if compression fails.) oops_buf[] gets written - * to NVRAM. + * oops_buf[] holds the compressed text, preceded by a oops header. + * oops header has u16 holding the version of oops header (to differentiate + * between old and new format header) followed by u16 holding the length of + * the compressed* text (*Or uncompressed, if compression fails.) and u64 + * holding the timestamp. oops_buf[] gets written to NVRAM. * - * oops_len points to the prefix. oops_data points to the compressed text. + * oops_log_info points to the header. oops_data points to the compressed text. * * +- oops_buf - * | +- oops_data - * v v - * +------------+-----------------------------------------------+ - * | length | text | - * | (2 bytes) | (oops_data_sz bytes) | - * +------------+-----------------------------------------------+ + * | +- oops_data + * v v + * +-----------+-----------+-----------+------------------------+ + * | version | length | timestamp | text | + * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) | + * +-----------+-----------+-----------+------------------------+ * ^ - * +- oops_len + * +- oops_log_info * * We preallocate these buffers during init to avoid kmalloc during oops/panic. */ static size_t big_oops_buf_sz; static char *big_oops_buf, *oops_buf; -static u16 *oops_len; static char *oops_data; static size_t oops_data_sz; @@ -114,6 +131,30 @@ static size_t oops_data_sz; #define MEM_LEVEL 4 static struct z_stream_s stream; +#ifdef CONFIG_PSTORE +static struct nvram_os_partition of_config_partition = { + .name = "of-config", + .index = -1, + .os_partition = false +}; + +static struct nvram_os_partition common_partition = { + .name = "common", + .index = -1, + .os_partition = false +}; + +static enum pstore_type_id nvram_type_ids[] = { + PSTORE_TYPE_DMESG, + PSTORE_TYPE_PPC_RTAS, + PSTORE_TYPE_PPC_OF, + PSTORE_TYPE_PPC_COMMON, + -1 +}; +static int read_type; +static unsigned long last_rtas_event; +#endif + static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) { unsigned int i; @@ -275,48 +316,72 @@ int nvram_write_error_log(char * buff, int length, { int rc = nvram_write_os_partition(&rtas_log_partition, buff, length, err_type, error_log_cnt); - if (!rc) + if (!rc) { last_unread_rtas_event = get_seconds(); +#ifdef CONFIG_PSTORE + last_rtas_event = get_seconds(); +#endif + } + return rc; } -/* nvram_read_error_log +/* nvram_read_partition * - * Reads nvram for error log for at most 'length' + * Reads nvram partition for at most 'length' */ -int nvram_read_error_log(char * buff, int length, - unsigned int * err_type, unsigned int * error_log_cnt) +int nvram_read_partition(struct nvram_os_partition *part, char *buff, + int length, unsigned int *err_type, + unsigned int *error_log_cnt) { int rc; loff_t tmp_index; struct err_log_info info; - if (rtas_log_partition.index == -1) + if (part->index == -1) return -1; - if (length > rtas_log_partition.size) - length = rtas_log_partition.size; + if (length > part->size) + length = part->size; - tmp_index = rtas_log_partition.index; + tmp_index = part->index; - rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index); - if (rc <= 0) { - printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); - return rc; + if (part->os_partition) { + rc = ppc_md.nvram_read((char *)&info, + sizeof(struct err_log_info), + &tmp_index); + if (rc <= 0) { + pr_err("%s: Failed nvram_read (%d)\n", __FUNCTION__, + rc); + return rc; + } } rc = ppc_md.nvram_read(buff, length, &tmp_index); if (rc <= 0) { - printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); + pr_err("%s: Failed nvram_read (%d)\n", __FUNCTION__, rc); return rc; } - *error_log_cnt = info.seq_num; - *err_type = info.error_type; + if (part->os_partition) { + *error_log_cnt = info.seq_num; + *err_type = info.error_type; + } return 0; } +/* nvram_read_error_log + * + * Reads nvram for error log for at most 'length' + */ +int nvram_read_error_log(char *buff, int length, + unsigned int *err_type, unsigned int *error_log_cnt) +{ + return nvram_read_partition(&rtas_log_partition, buff, length, + err_type, error_log_cnt); +} + /* This doesn't actually zero anything, but it sets the event_logged * word to tell that this event is safely in syslog. */ @@ -405,6 +470,349 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition return 0; } +/* + * Are we using the ibm,rtas-log for oops/panic reports? And if so, + * would logging this oops/panic overwrite an RTAS event that rtas_errd + * hasn't had a chance to read and process? Return 1 if so, else 0. + * + * We assume that if rtas_errd hasn't read the RTAS event in + * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to. + */ +static int clobbering_unread_rtas_event(void) +{ + return (oops_log_partition.index == rtas_log_partition.index + && last_unread_rtas_event + && get_seconds() - last_unread_rtas_event <= + NVRAM_RTAS_READ_TIMEOUT); +} + +/* Derived from logfs_compress() */ +static int nvram_compress(const void *in, void *out, size_t inlen, + size_t outlen) +{ + int err, ret; + + ret = -EIO; + err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS, + MEM_LEVEL, Z_DEFAULT_STRATEGY); + if (err != Z_OK) + goto error; + + stream.next_in = in; + stream.avail_in = inlen; + stream.total_in = 0; + stream.next_out = out; + stream.avail_out = outlen; + stream.total_out = 0; + + err = zlib_deflate(&stream, Z_FINISH); + if (err != Z_STREAM_END) + goto error; + + err = zlib_deflateEnd(&stream); + if (err != Z_OK) + goto error; + + if (stream.total_out >= stream.total_in) + goto error; + + ret = stream.total_out; +error: + return ret; +} + +/* Compress the text from big_oops_buf into oops_buf. */ +static int zip_oops(size_t text_len) +{ + struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; + int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len, + oops_data_sz); + if (zipped_len < 0) { + pr_err("nvram: compression failed; returned %d\n", zipped_len); + pr_err("nvram: logging uncompressed oops/panic report\n"); + return -1; + } + oops_hdr->version = OOPS_HDR_VERSION; + oops_hdr->report_length = (u16) zipped_len; + oops_hdr->timestamp = get_seconds(); + return 0; +} + +#ifdef CONFIG_PSTORE +/* Derived from logfs_uncompress */ +int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen) +{ + int err, ret; + + ret = -EIO; + err = zlib_inflateInit(&stream); + if (err != Z_OK) + goto error; + + stream.next_in = in; + stream.avail_in = inlen; + stream.total_in = 0; + stream.next_out = out; + stream.avail_out = outlen; + stream.total_out = 0; + + err = zlib_inflate(&stream, Z_FINISH); + if (err != Z_STREAM_END) + goto error; + + err = zlib_inflateEnd(&stream); + if (err != Z_OK) + goto error; + + ret = stream.total_out; +error: + return ret; +} + +static int unzip_oops(char *oops_buf, char *big_buf) +{ + struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; + u64 timestamp = oops_hdr->timestamp; + char *big_oops_data = NULL; + char *oops_data_buf = NULL; + size_t big_oops_data_sz; + int unzipped_len; + + big_oops_data = big_buf + sizeof(struct oops_log_info); + big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info); + oops_data_buf = oops_buf + sizeof(struct oops_log_info); + + unzipped_len = nvram_decompress(oops_data_buf, big_oops_data, + oops_hdr->report_length, + big_oops_data_sz); + + if (unzipped_len < 0) { + pr_err("nvram: decompression failed; returned %d\n", + unzipped_len); + return -1; + } + oops_hdr = (struct oops_log_info *)big_buf; + oops_hdr->version = OOPS_HDR_VERSION; + oops_hdr->report_length = (u16) unzipped_len; + oops_hdr->timestamp = timestamp; + return 0; +} + +static int nvram_pstore_open(struct pstore_info *psi) +{ + /* Reset the iterator to start reading partitions again */ + read_type = -1; + return 0; +} + +/** + * nvram_pstore_write - pstore write callback for nvram + * @type: Type of message logged + * @reason: reason behind dump (oops/panic) + * @id: identifier to indicate the write performed + * @part: pstore writes data to registered buffer in parts, + * part number will indicate the same. + * @count: Indicates oops count + * @hsize: Size of header added by pstore + * @size: number of bytes written to the registered buffer + * @psi: registered pstore_info structure + * + * Called by pstore_dump() when an oops or panic report is logged in the + * printk buffer. + * Returns 0 on successful write. + */ +static int nvram_pstore_write(enum pstore_type_id type, + enum kmsg_dump_reason reason, + u64 *id, unsigned int part, int count, + size_t hsize, size_t size, + struct pstore_info *psi) +{ + int rc; + unsigned int err_type = ERR_TYPE_KERNEL_PANIC; + struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf; + + /* part 1 has the recent messages from printk buffer */ + if (part > 1 || type != PSTORE_TYPE_DMESG || + clobbering_unread_rtas_event()) + return -1; + + oops_hdr->version = OOPS_HDR_VERSION; + oops_hdr->report_length = (u16) size; + oops_hdr->timestamp = get_seconds(); + + if (big_oops_buf) { + rc = zip_oops(size); + /* + * If compression fails copy recent log messages from + * big_oops_buf to oops_data. + */ + if (rc != 0) { + size_t diff = size - oops_data_sz + hsize; + + if (size > oops_data_sz) { + memcpy(oops_data, big_oops_buf, hsize); + memcpy(oops_data + hsize, big_oops_buf + diff, + oops_data_sz - hsize); + + oops_hdr->report_length = (u16) oops_data_sz; + } else + memcpy(oops_data, big_oops_buf, size); + } else + err_type = ERR_TYPE_KERNEL_PANIC_GZ; + } + + rc = nvram_write_os_partition(&oops_log_partition, oops_buf, + (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, + count); + + if (rc != 0) + return rc; + + *id = part; + return 0; +} + +/* + * Reads the oops/panic report, rtas, of-config and common partition. + * Returns the length of the data we read from each partition. + * Returns 0 if we've been called before. + */ +static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, + int *count, struct timespec *time, char **buf, + struct pstore_info *psi) +{ + struct oops_log_info *oops_hdr; + unsigned int err_type, id_no, size = 0; + struct nvram_os_partition *part = NULL; + char *buff = NULL, *big_buff = NULL; + int rc, sig = 0; + loff_t p; + +read_partition: + read_type++; + + switch (nvram_type_ids[read_type]) { + case PSTORE_TYPE_DMESG: + part = &oops_log_partition; + *type = PSTORE_TYPE_DMESG; + break; + case PSTORE_TYPE_PPC_RTAS: + part = &rtas_log_partition; + *type = PSTORE_TYPE_PPC_RTAS; + time->tv_sec = last_rtas_event; + time->tv_nsec = 0; + break; + case PSTORE_TYPE_PPC_OF: + sig = NVRAM_SIG_OF; + part = &of_config_partition; + *type = PSTORE_TYPE_PPC_OF; + *id = PSTORE_TYPE_PPC_OF; + time->tv_sec = 0; + time->tv_nsec = 0; + break; + case PSTORE_TYPE_PPC_COMMON: + sig = NVRAM_SIG_SYS; + part = &common_partition; + *type = PSTORE_TYPE_PPC_COMMON; + *id = PSTORE_TYPE_PPC_COMMON; + time->tv_sec = 0; + time->tv_nsec = 0; + break; + default: + return 0; + } + + if (!part->os_partition) { + p = nvram_find_partition(part->name, sig, &size); + if (p <= 0) { + pr_err("nvram: Failed to find partition %s, " + "err %d\n", part->name, (int)p); + return 0; + } + part->index = p; + part->size = size; + } + + buff = kmalloc(part->size, GFP_KERNEL); + + if (!buff) + return -ENOMEM; + + if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) { + kfree(buff); + return 0; + } + + *count = 0; + + if (part->os_partition) + *id = id_no; + + if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { + oops_hdr = (struct oops_log_info *)buff; + *buf = buff + sizeof(*oops_hdr); + + if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) { + big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL); + if (!big_buff) + return -ENOMEM; + + rc = unzip_oops(buff, big_buff); + + if (rc != 0) { + kfree(buff); + kfree(big_buff); + goto read_partition; + } + + oops_hdr = (struct oops_log_info *)big_buff; + *buf = big_buff + sizeof(*oops_hdr); + kfree(buff); + } + + time->tv_sec = oops_hdr->timestamp; + time->tv_nsec = 0; + return oops_hdr->report_length; + } + + *buf = buff; + return part->size; +} + +static struct pstore_info nvram_pstore_info = { + .owner = THIS_MODULE, + .name = "nvram", + .open = nvram_pstore_open, + .read = nvram_pstore_read, + .write = nvram_pstore_write, +}; + +static int nvram_pstore_init(void) +{ + int rc = 0; + + if (big_oops_buf) { + nvram_pstore_info.buf = big_oops_buf; + nvram_pstore_info.bufsize = big_oops_buf_sz; + } else { + nvram_pstore_info.buf = oops_data; + nvram_pstore_info.bufsize = oops_data_sz; + } + + rc = pstore_register(&nvram_pstore_info); + if (rc != 0) + pr_err("nvram: pstore_register() failed, defaults to " + "kmsg_dump; returned %d\n", rc); + + return rc; +} +#else +static int nvram_pstore_init(void) +{ + return -1; +} +#endif + static void __init nvram_init_oops_partition(int rtas_partition_exists) { int rc; @@ -425,9 +833,8 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists) oops_log_partition.name); return; } - oops_len = (u16*) oops_buf; - oops_data = oops_buf + sizeof(u16); - oops_data_sz = oops_log_partition.size - sizeof(u16); + oops_data = oops_buf + sizeof(struct oops_log_info); + oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info); /* * Figure compression (preceded by elimination of each line's <n> @@ -452,6 +859,11 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists) stream.workspace = NULL; } + rc = nvram_pstore_init(); + + if (!rc) + return; + rc = kmsg_dump_register(&nvram_kmsg_dumper); if (rc != 0) { pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); @@ -501,70 +913,6 @@ int __init pSeries_nvram_init(void) return 0; } -/* - * Are we using the ibm,rtas-log for oops/panic reports? And if so, - * would logging this oops/panic overwrite an RTAS event that rtas_errd - * hasn't had a chance to read and process? Return 1 if so, else 0. - * - * We assume that if rtas_errd hasn't read the RTAS event in - * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to. - */ -static int clobbering_unread_rtas_event(void) -{ - return (oops_log_partition.index == rtas_log_partition.index - && last_unread_rtas_event - && get_seconds() - last_unread_rtas_event <= - NVRAM_RTAS_READ_TIMEOUT); -} - -/* Derived from logfs_compress() */ -static int nvram_compress(const void *in, void *out, size_t inlen, - size_t outlen) -{ - int err, ret; - - ret = -EIO; - err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS, - MEM_LEVEL, Z_DEFAULT_STRATEGY); - if (err != Z_OK) - goto error; - - stream.next_in = in; - stream.avail_in = inlen; - stream.total_in = 0; - stream.next_out = out; - stream.avail_out = outlen; - stream.total_out = 0; - - err = zlib_deflate(&stream, Z_FINISH); - if (err != Z_STREAM_END) - goto error; - - err = zlib_deflateEnd(&stream); - if (err != Z_OK) - goto error; - - if (stream.total_out >= stream.total_in) - goto error; - - ret = stream.total_out; -error: - return ret; -} - -/* Compress the text from big_oops_buf into oops_buf. */ -static int zip_oops(size_t text_len) -{ - int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len, - oops_data_sz); - if (zipped_len < 0) { - pr_err("nvram: compression failed; returned %d\n", zipped_len); - pr_err("nvram: logging uncompressed oops/panic report\n"); - return -1; - } - *oops_len = (u16) zipped_len; - return 0; -} /* * This is our kmsg_dump callback, called after an oops or panic report @@ -576,6 +924,7 @@ static int zip_oops(size_t text_len) static void oops_to_nvram(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) { + struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; static unsigned int oops_count = 0; static bool panicking = false; static DEFINE_SPINLOCK(lock); @@ -619,14 +968,17 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, } if (rc != 0) { kmsg_dump_rewind(dumper); - kmsg_dump_get_buffer(dumper, true, + kmsg_dump_get_buffer(dumper, false, oops_data, oops_data_sz, &text_len); err_type = ERR_TYPE_KERNEL_PANIC; - *oops_len = (u16) text_len; + oops_hdr->version = OOPS_HDR_VERSION; + oops_hdr->report_length = (u16) text_len; + oops_hdr->timestamp = get_seconds(); } (void) nvram_write_os_partition(&oops_log_partition, oops_buf, - (int) (sizeof(*oops_len) + *oops_len), err_type, ++oops_count); + (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, + ++oops_count); spin_unlock_irqrestore(&lock, flags); } diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index c91b22be9288..efe61374f6ea 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -64,91 +64,6 @@ pcibios_find_pci_bus(struct device_node *dn) } EXPORT_SYMBOL_GPL(pcibios_find_pci_bus); -/** - * __pcibios_remove_pci_devices - remove all devices under this bus - * @bus: the indicated PCI bus - * @purge_pe: destroy the PE on removal of PCI devices - * - * Remove all of the PCI devices under this bus both from the - * linux pci device tree, and from the powerpc EEH address cache. - * By default, the corresponding PE will be destroied during the - * normal PCI hotplug path. For PCI hotplug during EEH recovery, - * the corresponding PE won't be destroied and deallocated. - */ -void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe) -{ - struct pci_dev *dev, *tmp; - struct pci_bus *child_bus; - - /* First go down child busses */ - list_for_each_entry(child_bus, &bus->children, node) - __pcibios_remove_pci_devices(child_bus, purge_pe); - - pr_debug("PCI: Removing devices on bus %04x:%02x\n", - pci_domain_nr(bus), bus->number); - list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { - pr_debug(" * Removing %s...\n", pci_name(dev)); - eeh_remove_bus_device(dev, purge_pe); - pci_stop_and_remove_bus_device(dev); - } -} - -/** - * pcibios_remove_pci_devices - remove all devices under this bus - * - * Remove all of the PCI devices under this bus both from the - * linux pci device tree, and from the powerpc EEH address cache. - */ -void pcibios_remove_pci_devices(struct pci_bus *bus) -{ - __pcibios_remove_pci_devices(bus, 1); -} -EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); - -/** - * pcibios_add_pci_devices - adds new pci devices to bus - * - * This routine will find and fixup new pci devices under - * the indicated bus. This routine presumes that there - * might already be some devices under this bridge, so - * it carefully tries to add only new devices. (And that - * is how this routine differs from other, similar pcibios - * routines.) - */ -void pcibios_add_pci_devices(struct pci_bus * bus) -{ - int slotno, num, mode, pass, max; - struct pci_dev *dev; - struct device_node *dn = pci_bus_to_OF_node(bus); - - eeh_add_device_tree_early(dn); - - mode = PCI_PROBE_NORMAL; - if (ppc_md.pci_probe_mode) - mode = ppc_md.pci_probe_mode(bus); - - if (mode == PCI_PROBE_DEVTREE) { - /* use ofdt-based probe */ - of_rescan_bus(dn, bus); - } else if (mode == PCI_PROBE_NORMAL) { - /* use legacy probe */ - slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); - num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); - if (!num) - return; - pcibios_setup_bus_devices(bus); - max = bus->busn_res.start; - for (pass=0; pass < 2; pass++) - list_for_each_entry(dev, &bus->devices, bus_list) { - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) - max = pci_scan_bridge(bus, dev, max, pass); - } - } - pcibios_finish_adding_to_bus(bus); -} -EXPORT_SYMBOL_GPL(pcibios_add_pci_devices); - struct pci_controller *init_phb_dynamic(struct device_node *dn) { struct pci_controller *phb; diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index c4dfccd3a3d9..7b3cbde8c783 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -83,7 +83,7 @@ static void handle_system_shutdown(char event_modifier) switch (event_modifier) { case EPOW_SHUTDOWN_NORMAL: pr_emerg("Firmware initiated power off"); - orderly_poweroff(1); + orderly_poweroff(true); break; case EPOW_SHUTDOWN_ON_UPS: @@ -95,13 +95,13 @@ static void handle_system_shutdown(char event_modifier) pr_emerg("Loss of system critical functions reported by " "firmware"); pr_emerg("Check RTAS error log for details"); - orderly_poweroff(1); + orderly_poweroff(true); break; case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH: pr_emerg("Ambient temperature too high reported by firmware"); pr_emerg("Check RTAS error log for details"); - orderly_poweroff(1); + orderly_poweroff(true); break; default: @@ -162,7 +162,7 @@ void rtas_parse_epow_errlog(struct rtas_error_log *log) case EPOW_SYSTEM_HALT: pr_emerg("Firmware initiated power off"); - orderly_poweroff(1); + orderly_poweroff(true); break; case EPOW_MAIN_ENCLOSURE: diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 12bc8c3663ad..306643cc9dbc 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -192,7 +192,7 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) /* Special case - we inhibit secondary thread startup * during boot if the user requests it. */ - if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { + if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) { if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) return 0; if (smt_enabled_at_boot diff --git a/arch/powerpc/relocs_check.pl b/arch/powerpc/relocs_check.pl index 7f5b83808862..3f46e8b9c56d 100755 --- a/arch/powerpc/relocs_check.pl +++ b/arch/powerpc/relocs_check.pl @@ -7,7 +7,7 @@ # as published by the Free Software Foundation; either version # 2 of the License, or (at your option) any later version. -# This script checks the relcoations of a vmlinux for "suspicious" +# This script checks the relocations of a vmlinux for "suspicious" # relocations. use strict; @@ -28,7 +28,7 @@ open(FD, "$objdump -R $vmlinux|") or die; while (<FD>) { study $_; - # Only look at relcoation lines. + # Only look at relocation lines. next if (!/\s+R_/); # These relocations are okay @@ -45,7 +45,7 @@ while (<FD>) { /\bR_PPC_ADDR16_HA\b/ or /\bR_PPC_RELATIVE\b/ or /\bR_PPC_NONE\b/); - # If we see this type of relcoation it's an idication that + # If we see this type of relocation it's an idication that # we /may/ be using an old version of binutils. if (/R_PPC64_UADDR64/) { $old_binutils++; @@ -61,6 +61,6 @@ if ($bad_relocs_count) { } if ($old_binutils) { - print "WARNING: You need at binutils >= 2.19 to build a ". - "CONFIG_RELCOATABLE kernel\n"; + print "WARNING: You need at least binutils >= 2.19 to build a ". + "CONFIG_RELOCATABLE kernel\n"; } diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 99464a7bdb3b..f67ac900d870 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -4,6 +4,8 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) +obj-$(CONFIG_MPIC_TIMER) += mpic_timer.o +obj-$(CONFIG_FSL_MPIC_TIMER_WAKEUP) += fsl_mpic_timer_wakeup.o mpic-msgr-obj-$(CONFIG_MPIC_MSGR) += mpic_msgr.o obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) $(mpic-msgr-obj-y) obj-$(CONFIG_PPC_EPAPR_HV_PIC) += ehv_pic.o diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index d4fa03f2b6ac..5e6ff38ea69f 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -120,6 +120,7 @@ static irqreturn_t cpm_error_interrupt(int irq, void *dev) static struct irqaction cpm_error_irqaction = { .handler = cpm_error_interrupt, + .flags = IRQF_NO_THREAD, .name = "error", }; diff --git a/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c b/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c new file mode 100644 index 000000000000..1707bf04dec6 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c @@ -0,0 +1,161 @@ +/* + * MPIC timer wakeup driver + * + * Copyright 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/device.h> + +#include <asm/mpic_timer.h> +#include <asm/mpic.h> + +struct fsl_mpic_timer_wakeup { + struct mpic_timer *timer; + struct work_struct free_work; +}; + +static struct fsl_mpic_timer_wakeup *fsl_wakeup; +static DEFINE_MUTEX(sysfs_lock); + +static void fsl_free_resource(struct work_struct *ws) +{ + struct fsl_mpic_timer_wakeup *wakeup = + container_of(ws, struct fsl_mpic_timer_wakeup, free_work); + + mutex_lock(&sysfs_lock); + + if (wakeup->timer) { + disable_irq_wake(wakeup->timer->irq); + mpic_free_timer(wakeup->timer); + } + + wakeup->timer = NULL; + mutex_unlock(&sysfs_lock); +} + +static irqreturn_t fsl_mpic_timer_irq(int irq, void *dev_id) +{ + struct fsl_mpic_timer_wakeup *wakeup = dev_id; + + schedule_work(&wakeup->free_work); + + return wakeup->timer ? IRQ_HANDLED : IRQ_NONE; +} + +static ssize_t fsl_timer_wakeup_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct timeval interval; + int val = 0; + + mutex_lock(&sysfs_lock); + if (fsl_wakeup->timer) { + mpic_get_remain_time(fsl_wakeup->timer, &interval); + val = interval.tv_sec + 1; + } + mutex_unlock(&sysfs_lock); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t fsl_timer_wakeup_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct timeval interval; + int ret; + + interval.tv_usec = 0; + if (kstrtol(buf, 0, &interval.tv_sec)) + return -EINVAL; + + mutex_lock(&sysfs_lock); + + if (fsl_wakeup->timer) { + disable_irq_wake(fsl_wakeup->timer->irq); + mpic_free_timer(fsl_wakeup->timer); + fsl_wakeup->timer = NULL; + } + + if (!interval.tv_sec) { + mutex_unlock(&sysfs_lock); + return count; + } + + fsl_wakeup->timer = mpic_request_timer(fsl_mpic_timer_irq, + fsl_wakeup, &interval); + if (!fsl_wakeup->timer) { + mutex_unlock(&sysfs_lock); + return -EINVAL; + } + + ret = enable_irq_wake(fsl_wakeup->timer->irq); + if (ret) { + mpic_free_timer(fsl_wakeup->timer); + fsl_wakeup->timer = NULL; + mutex_unlock(&sysfs_lock); + + return ret; + } + + mpic_start_timer(fsl_wakeup->timer); + + mutex_unlock(&sysfs_lock); + + return count; +} + +static struct device_attribute mpic_attributes = __ATTR(timer_wakeup, 0644, + fsl_timer_wakeup_show, fsl_timer_wakeup_store); + +static int __init fsl_wakeup_sys_init(void) +{ + int ret; + + fsl_wakeup = kzalloc(sizeof(struct fsl_mpic_timer_wakeup), GFP_KERNEL); + if (!fsl_wakeup) + return -ENOMEM; + + INIT_WORK(&fsl_wakeup->free_work, fsl_free_resource); + + ret = device_create_file(mpic_subsys.dev_root, &mpic_attributes); + if (ret) + kfree(fsl_wakeup); + + return ret; +} + +static void __exit fsl_wakeup_sys_exit(void) +{ + device_remove_file(mpic_subsys.dev_root, &mpic_attributes); + + mutex_lock(&sysfs_lock); + + if (fsl_wakeup->timer) { + disable_irq_wake(fsl_wakeup->timer->irq); + mpic_free_timer(fsl_wakeup->timer); + } + + kfree(fsl_wakeup); + + mutex_unlock(&sysfs_lock); +} + +module_init(fsl_wakeup_sys_init); +module_exit(fsl_wakeup_sys_exit); + +MODULE_DESCRIPTION("Freescale MPIC global timer wakeup driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Wang Dongsheng <dongsheng.wang@freescale.com>"); diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 3cc2f9159ab1..1be54faf60dd 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -48,6 +48,12 @@ #define DBG(fmt...) #endif +struct bus_type mpic_subsys = { + .name = "mpic", + .dev_name = "mpic", +}; +EXPORT_SYMBOL_GPL(mpic_subsys); + static struct mpic *mpics; static struct mpic *mpic_primary; static DEFINE_RAW_SPINLOCK(mpic_lock); @@ -920,6 +926,22 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) return IRQ_SET_MASK_OK_NOCOPY; } +static int mpic_irq_set_wake(struct irq_data *d, unsigned int on) +{ + struct irq_desc *desc = container_of(d, struct irq_desc, irq_data); + struct mpic *mpic = mpic_from_irq_data(d); + + if (!(mpic->flags & MPIC_FSL)) + return -ENXIO; + + if (on) + desc->action->flags |= IRQF_NO_SUSPEND; + else + desc->action->flags &= ~IRQF_NO_SUSPEND; + + return 0; +} + void mpic_set_vector(unsigned int virq, unsigned int vector) { struct mpic *mpic = mpic_from_irq(virq); @@ -957,6 +979,7 @@ static struct irq_chip mpic_irq_chip = { .irq_unmask = mpic_unmask_irq, .irq_eoi = mpic_end_irq, .irq_set_type = mpic_set_irq_type, + .irq_set_wake = mpic_irq_set_wake, }; #ifdef CONFIG_SMP @@ -971,6 +994,7 @@ static struct irq_chip mpic_tm_chip = { .irq_mask = mpic_mask_tm, .irq_unmask = mpic_unmask_tm, .irq_eoi = mpic_end_irq, + .irq_set_wake = mpic_irq_set_wake, }; #ifdef CONFIG_MPIC_U3_HT_IRQS @@ -1173,10 +1197,33 @@ static struct irq_domain_ops mpic_host_ops = { .xlate = mpic_host_xlate, }; +static u32 fsl_mpic_get_version(struct mpic *mpic) +{ + u32 brr1; + + if (!(mpic->flags & MPIC_FSL)) + return 0; + + brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs, + MPIC_FSL_BRR1); + + return brr1 & MPIC_FSL_BRR1_VER; +} + /* * Exported functions */ +u32 fsl_mpic_primary_get_version(void) +{ + struct mpic *mpic = mpic_primary; + + if (mpic) + return fsl_mpic_get_version(mpic); + + return 0; +} + struct mpic * __init mpic_alloc(struct device_node *node, phys_addr_t phys_addr, unsigned int flags, @@ -1323,7 +1370,6 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); if (mpic->flags & MPIC_FSL) { - u32 brr1; int ret; /* @@ -1334,9 +1380,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic_map(mpic, mpic->paddr, &mpic->thiscpuregs, MPIC_CPU_THISBASE, 0x1000); - brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs, - MPIC_FSL_BRR1); - fsl_version = brr1 & MPIC_FSL_BRR1_VER; + fsl_version = fsl_mpic_get_version(mpic); /* Error interrupt mask register (EIMR) is required for * handling individual device error interrupts. EIMR @@ -1526,9 +1570,7 @@ void __init mpic_init(struct mpic *mpic) mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); if (mpic->flags & MPIC_FSL) { - u32 brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs, - MPIC_FSL_BRR1); - u32 version = brr1 & MPIC_FSL_BRR1_VER; + u32 version = fsl_mpic_get_version(mpic); /* * Timer group B is present at the latest in MPIC 3.1 (e.g. @@ -1999,6 +2041,8 @@ static struct syscore_ops mpic_syscore_ops = { static int mpic_init_sys(void) { register_syscore_ops(&mpic_syscore_ops); + subsys_system_register(&mpic_subsys, NULL); + return 0; } diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c new file mode 100644 index 000000000000..c06db92a4fb1 --- /dev/null +++ b/arch/powerpc/sysdev/mpic_timer.c @@ -0,0 +1,593 @@ +/* + * MPIC timer driver + * + * Copyright 2013 Freescale Semiconductor, Inc. + * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com> + * Li Yang <leoli@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/syscore_ops.h> +#include <sysdev/fsl_soc.h> +#include <asm/io.h> + +#include <asm/mpic_timer.h> + +#define FSL_GLOBAL_TIMER 0x1 + +/* Clock Ratio + * Divide by 64 0x00000300 + * Divide by 32 0x00000200 + * Divide by 16 0x00000100 + * Divide by 8 0x00000000 (Hardware default div) + */ +#define MPIC_TIMER_TCR_CLKDIV 0x00000300 + +#define MPIC_TIMER_TCR_ROVR_OFFSET 24 + +#define TIMER_STOP 0x80000000 +#define TIMERS_PER_GROUP 4 +#define MAX_TICKS (~0U >> 1) +#define MAX_TICKS_CASCADE (~0U) +#define TIMER_OFFSET(num) (1 << (TIMERS_PER_GROUP - 1 - num)) + +/* tv_usec should be less than ONE_SECOND, otherwise use tv_sec */ +#define ONE_SECOND 1000000 + +struct timer_regs { + u32 gtccr; + u32 res0[3]; + u32 gtbcr; + u32 res1[3]; + u32 gtvpr; + u32 res2[3]; + u32 gtdr; + u32 res3[3]; +}; + +struct cascade_priv { + u32 tcr_value; /* TCR register: CASC & ROVR value */ + unsigned int cascade_map; /* cascade map */ + unsigned int timer_num; /* cascade control timer */ +}; + +struct timer_group_priv { + struct timer_regs __iomem *regs; + struct mpic_timer timer[TIMERS_PER_GROUP]; + struct list_head node; + unsigned int timerfreq; + unsigned int idle; + unsigned int flags; + spinlock_t lock; + void __iomem *group_tcr; +}; + +static struct cascade_priv cascade_timer[] = { + /* cascade timer 0 and 1 */ + {0x1, 0xc, 0x1}, + /* cascade timer 1 and 2 */ + {0x2, 0x6, 0x2}, + /* cascade timer 2 and 3 */ + {0x4, 0x3, 0x3} +}; + +static LIST_HEAD(timer_group_list); + +static void convert_ticks_to_time(struct timer_group_priv *priv, + const u64 ticks, struct timeval *time) +{ + u64 tmp_sec; + + time->tv_sec = (__kernel_time_t)div_u64(ticks, priv->timerfreq); + tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq; + + time->tv_usec = (__kernel_suseconds_t) + div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq); + + return; +} + +/* the time set by the user is converted to "ticks" */ +static int convert_time_to_ticks(struct timer_group_priv *priv, + const struct timeval *time, u64 *ticks) +{ + u64 max_value; /* prevent u64 overflow */ + u64 tmp = 0; + + u64 tmp_sec; + u64 tmp_ms; + u64 tmp_us; + + max_value = div_u64(ULLONG_MAX, priv->timerfreq); + + if (time->tv_sec > max_value || + (time->tv_sec == max_value && time->tv_usec > 0)) + return -EINVAL; + + tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq; + tmp += tmp_sec; + + tmp_ms = time->tv_usec / 1000; + tmp_ms = div_u64((u64)tmp_ms * (u64)priv->timerfreq, 1000); + tmp += tmp_ms; + + tmp_us = time->tv_usec % 1000; + tmp_us = div_u64((u64)tmp_us * (u64)priv->timerfreq, 1000000); + tmp += tmp_us; + + *ticks = tmp; + + return 0; +} + +/* detect whether there is a cascade timer available */ +static struct mpic_timer *detect_idle_cascade_timer( + struct timer_group_priv *priv) +{ + struct cascade_priv *casc_priv; + unsigned int map; + unsigned int array_size = ARRAY_SIZE(cascade_timer); + unsigned int num; + unsigned int i; + unsigned long flags; + + casc_priv = cascade_timer; + for (i = 0; i < array_size; i++) { + spin_lock_irqsave(&priv->lock, flags); + map = casc_priv->cascade_map & priv->idle; + if (map == casc_priv->cascade_map) { + num = casc_priv->timer_num; + priv->timer[num].cascade_handle = casc_priv; + + /* set timer busy */ + priv->idle &= ~casc_priv->cascade_map; + spin_unlock_irqrestore(&priv->lock, flags); + return &priv->timer[num]; + } + spin_unlock_irqrestore(&priv->lock, flags); + casc_priv++; + } + + return NULL; +} + +static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks, + unsigned int num) +{ + struct cascade_priv *casc_priv; + u32 tcr; + u32 tmp_ticks; + u32 rem_ticks; + + /* set group tcr reg for cascade */ + casc_priv = priv->timer[num].cascade_handle; + if (!casc_priv) + return -EINVAL; + + tcr = casc_priv->tcr_value | + (casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET); + setbits32(priv->group_tcr, tcr); + + tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks); + + out_be32(&priv->regs[num].gtccr, 0); + out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP); + + out_be32(&priv->regs[num - 1].gtccr, 0); + out_be32(&priv->regs[num - 1].gtbcr, rem_ticks); + + return 0; +} + +static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv, + u64 ticks) +{ + struct mpic_timer *allocated_timer; + + /* Two cascade timers: Support the maximum time */ + const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE; + int ret; + + if (ticks > max_ticks) + return NULL; + + /* detect idle timer */ + allocated_timer = detect_idle_cascade_timer(priv); + if (!allocated_timer) + return NULL; + + /* set ticks to timer */ + ret = set_cascade_timer(priv, ticks, allocated_timer->num); + if (ret < 0) + return NULL; + + return allocated_timer; +} + +static struct mpic_timer *get_timer(const struct timeval *time) +{ + struct timer_group_priv *priv; + struct mpic_timer *timer; + + u64 ticks; + unsigned int num; + unsigned int i; + unsigned long flags; + int ret; + + list_for_each_entry(priv, &timer_group_list, node) { + ret = convert_time_to_ticks(priv, time, &ticks); + if (ret < 0) + return NULL; + + if (ticks > MAX_TICKS) { + if (!(priv->flags & FSL_GLOBAL_TIMER)) + return NULL; + + timer = get_cascade_timer(priv, ticks); + if (!timer) + continue; + + return timer; + } + + for (i = 0; i < TIMERS_PER_GROUP; i++) { + /* one timer: Reverse allocation */ + num = TIMERS_PER_GROUP - 1 - i; + spin_lock_irqsave(&priv->lock, flags); + if (priv->idle & (1 << i)) { + /* set timer busy */ + priv->idle &= ~(1 << i); + /* set ticks & stop timer */ + out_be32(&priv->regs[num].gtbcr, + ticks | TIMER_STOP); + out_be32(&priv->regs[num].gtccr, 0); + priv->timer[num].cascade_handle = NULL; + spin_unlock_irqrestore(&priv->lock, flags); + return &priv->timer[num]; + } + spin_unlock_irqrestore(&priv->lock, flags); + } + } + + return NULL; +} + +/** + * mpic_start_timer - start hardware timer + * @handle: the timer to be started. + * + * It will do ->fn(->dev) callback from the hardware interrupt at + * the ->timeval point in the future. + */ +void mpic_start_timer(struct mpic_timer *handle) +{ + struct timer_group_priv *priv = container_of(handle, + struct timer_group_priv, timer[handle->num]); + + clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP); +} +EXPORT_SYMBOL(mpic_start_timer); + +/** + * mpic_stop_timer - stop hardware timer + * @handle: the timer to be stoped + * + * The timer periodically generates an interrupt. Unless user stops the timer. + */ +void mpic_stop_timer(struct mpic_timer *handle) +{ + struct timer_group_priv *priv = container_of(handle, + struct timer_group_priv, timer[handle->num]); + struct cascade_priv *casc_priv; + + setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP); + + casc_priv = priv->timer[handle->num].cascade_handle; + if (casc_priv) { + out_be32(&priv->regs[handle->num].gtccr, 0); + out_be32(&priv->regs[handle->num - 1].gtccr, 0); + } else { + out_be32(&priv->regs[handle->num].gtccr, 0); + } +} +EXPORT_SYMBOL(mpic_stop_timer); + +/** + * mpic_get_remain_time - get timer time + * @handle: the timer to be selected. + * @time: time for timer + * + * Query timer remaining time. + */ +void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time) +{ + struct timer_group_priv *priv = container_of(handle, + struct timer_group_priv, timer[handle->num]); + struct cascade_priv *casc_priv; + + u64 ticks; + u32 tmp_ticks; + + casc_priv = priv->timer[handle->num].cascade_handle; + if (casc_priv) { + tmp_ticks = in_be32(&priv->regs[handle->num].gtccr); + ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE; + tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr); + ticks += tmp_ticks; + } else { + ticks = in_be32(&priv->regs[handle->num].gtccr); + } + + convert_ticks_to_time(priv, ticks, time); +} +EXPORT_SYMBOL(mpic_get_remain_time); + +/** + * mpic_free_timer - free hardware timer + * @handle: the timer to be removed. + * + * Free the timer. + * + * Note: can not be used in interrupt context. + */ +void mpic_free_timer(struct mpic_timer *handle) +{ + struct timer_group_priv *priv = container_of(handle, + struct timer_group_priv, timer[handle->num]); + + struct cascade_priv *casc_priv; + unsigned long flags; + + mpic_stop_timer(handle); + + casc_priv = priv->timer[handle->num].cascade_handle; + + free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev); + + spin_lock_irqsave(&priv->lock, flags); + if (casc_priv) { + u32 tcr; + tcr = casc_priv->tcr_value | (casc_priv->tcr_value << + MPIC_TIMER_TCR_ROVR_OFFSET); + clrbits32(priv->group_tcr, tcr); + priv->idle |= casc_priv->cascade_map; + priv->timer[handle->num].cascade_handle = NULL; + } else { + priv->idle |= TIMER_OFFSET(handle->num); + } + spin_unlock_irqrestore(&priv->lock, flags); +} +EXPORT_SYMBOL(mpic_free_timer); + +/** + * mpic_request_timer - get a hardware timer + * @fn: interrupt handler function + * @dev: callback function of the data + * @time: time for timer + * + * This executes the "request_irq", returning NULL + * else "handle" on success. + */ +struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev, + const struct timeval *time) +{ + struct mpic_timer *allocated_timer; + int ret; + + if (list_empty(&timer_group_list)) + return NULL; + + if (!(time->tv_sec + time->tv_usec) || + time->tv_sec < 0 || time->tv_usec < 0) + return NULL; + + if (time->tv_usec > ONE_SECOND) + return NULL; + + allocated_timer = get_timer(time); + if (!allocated_timer) + return NULL; + + ret = request_irq(allocated_timer->irq, fn, + IRQF_TRIGGER_LOW, "global-timer", dev); + if (ret) { + mpic_free_timer(allocated_timer); + return NULL; + } + + allocated_timer->dev = dev; + + return allocated_timer; +} +EXPORT_SYMBOL(mpic_request_timer); + +static int timer_group_get_freq(struct device_node *np, + struct timer_group_priv *priv) +{ + u32 div; + + if (priv->flags & FSL_GLOBAL_TIMER) { + struct device_node *dn; + + dn = of_find_compatible_node(NULL, NULL, "fsl,mpic"); + if (dn) { + of_property_read_u32(dn, "clock-frequency", + &priv->timerfreq); + of_node_put(dn); + } + } + + if (priv->timerfreq <= 0) + return -EINVAL; + + if (priv->flags & FSL_GLOBAL_TIMER) { + div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8; + priv->timerfreq /= div; + } + + return 0; +} + +static int timer_group_get_irq(struct device_node *np, + struct timer_group_priv *priv) +{ + const u32 all_timer[] = { 0, TIMERS_PER_GROUP }; + const u32 *p; + u32 offset; + u32 count; + + unsigned int i; + unsigned int j; + unsigned int irq_index = 0; + unsigned int irq; + int len; + + p = of_get_property(np, "fsl,available-ranges", &len); + if (p && len % (2 * sizeof(u32)) != 0) { + pr_err("%s: malformed available-ranges property.\n", + np->full_name); + return -EINVAL; + } + + if (!p) { + p = all_timer; + len = sizeof(all_timer); + } + + len /= 2 * sizeof(u32); + + for (i = 0; i < len; i++) { + offset = p[i * 2]; + count = p[i * 2 + 1]; + for (j = 0; j < count; j++) { + irq = irq_of_parse_and_map(np, irq_index); + if (!irq) { + pr_err("%s: irq parse and map failed.\n", + np->full_name); + return -EINVAL; + } + + /* Set timer idle */ + priv->idle |= TIMER_OFFSET((offset + j)); + priv->timer[offset + j].irq = irq; + priv->timer[offset + j].num = offset + j; + irq_index++; + } + } + + return 0; +} + +static void timer_group_init(struct device_node *np) +{ + struct timer_group_priv *priv; + unsigned int i = 0; + int ret; + + priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL); + if (!priv) { + pr_err("%s: cannot allocate memory for group.\n", + np->full_name); + return; + } + + if (of_device_is_compatible(np, "fsl,mpic-global-timer")) + priv->flags |= FSL_GLOBAL_TIMER; + + priv->regs = of_iomap(np, i++); + if (!priv->regs) { + pr_err("%s: cannot ioremap timer register address.\n", + np->full_name); + goto out; + } + + if (priv->flags & FSL_GLOBAL_TIMER) { + priv->group_tcr = of_iomap(np, i++); + if (!priv->group_tcr) { + pr_err("%s: cannot ioremap tcr address.\n", + np->full_name); + goto out; + } + } + + ret = timer_group_get_freq(np, priv); + if (ret < 0) { + pr_err("%s: cannot get timer frequency.\n", np->full_name); + goto out; + } + + ret = timer_group_get_irq(np, priv); + if (ret < 0) { + pr_err("%s: cannot get timer irqs.\n", np->full_name); + goto out; + } + + spin_lock_init(&priv->lock); + + /* Init FSL timer hardware */ + if (priv->flags & FSL_GLOBAL_TIMER) + setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV); + + list_add_tail(&priv->node, &timer_group_list); + + return; + +out: + if (priv->regs) + iounmap(priv->regs); + + if (priv->group_tcr) + iounmap(priv->group_tcr); + + kfree(priv); +} + +static void mpic_timer_resume(void) +{ + struct timer_group_priv *priv; + + list_for_each_entry(priv, &timer_group_list, node) { + /* Init FSL timer hardware */ + if (priv->flags & FSL_GLOBAL_TIMER) + setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV); + } +} + +static const struct of_device_id mpic_timer_ids[] = { + { .compatible = "fsl,mpic-global-timer", }, + {}, +}; + +static struct syscore_ops mpic_timer_syscore_ops = { + .resume = mpic_timer_resume, +}; + +static int __init mpic_timer_init(void) +{ + struct device_node *np = NULL; + + for_each_matching_node(np, mpic_timer_ids) + timer_group_init(np); + + register_syscore_ops(&mpic_timer_syscore_ops); + + if (list_empty(&timer_group_list)) + return -ENODEV; + + return 0; +} +subsys_initcall(mpic_timer_init); diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3238d4004e84..e87ecaa2c569 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -274,6 +274,14 @@ struct kvm_arch{ int css_support; }; +#define KVM_HVA_ERR_BAD (-1UL) +#define KVM_HVA_ERR_RO_BAD (-2UL) + +static inline bool kvm_is_error_hva(unsigned long addr) +{ + return IS_ERR_VALUE(addr); +} + extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern char sie_exit; #endif diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 6340178748bf..ff132ac64ddd 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -12,8 +12,6 @@ typedef struct { unsigned long asce_bits; unsigned long asce_limit; unsigned long vdso_base; - /* Cloned contexts will be created with extended page tables. */ - unsigned int alloc_pgste:1; /* The mmu context has extended page tables. */ unsigned int has_pgste:1; } mm_context_t; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 084e7755ed9b..4fb67a0e4ddf 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -21,24 +21,7 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_64BIT mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif - if (current->mm && current->mm->context.alloc_pgste) { - /* - * alloc_pgste indicates, that any NEW context will be created - * with extended page tables. The old context is unchanged. The - * page table allocation and the page table operations will - * look at has_pgste to distinguish normal and extended page - * tables. The only way to create extended page tables is to - * set alloc_pgste and then create a new context (e.g. dup_mm). - * The page table allocation is called after init_new_context - * and if has_pgste is set, it will create extended page - * tables. - */ - mm->context.has_pgste = 1; - mm->context.alloc_pgste = 1; - } else { - mm->context.has_pgste = 0; - mm->context.alloc_pgste = 0; - } + mm->context.has_pgste = 0; mm->context.asce_limit = STACK_TOP_MAX; crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 0ea4e591fa78..7a60bb93e83c 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1361,13 +1361,25 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ +static inline void pmdp_flush_lazy(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + int active = (mm == current->active_mm) ? 1 : 0; + + if ((atomic_read(&mm->context.attach_count) & 0xffff) > active) + __pmd_idte(address, pmdp); + else + mm->context.flush_mm = 1; +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PGTABLE_DEPOSIT -extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); #define __HAVE_ARCH_PGTABLE_WITHDRAW -extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); static inline int pmd_trans_splitting(pmd_t pmd) { diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 6b499870662f..83c85c217f5c 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -43,6 +43,7 @@ extern void execve_tail(void); #ifndef CONFIG_64BIT #define TASK_SIZE (1UL << 31) +#define TASK_MAX_SIZE (1UL << 31) #define TASK_UNMAPPED_BASE (1UL << 30) #else /* CONFIG_64BIT */ @@ -51,6 +52,7 @@ extern void execve_tail(void); #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ (1UL << 30) : (1UL << 41)) #define TASK_SIZE TASK_SIZE_OF(current) +#define TASK_MAX_SIZE (1UL << 53) #endif /* CONFIG_64BIT */ diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 2dacb306835c..92494494692e 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -80,4 +80,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0a49095104c9..497451ec5e26 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -719,10 +719,6 @@ static void reserve_oldmem(void) } create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE); create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE); - if (OLDMEM_BASE + OLDMEM_SIZE == real_size) - saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; - else - saved_max_pfn = PFN_DOWN(real_size) - 1; #endif } diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 3074475c8ae0..3a74d8af0d69 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -119,12 +119,21 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) * The layout is as follows: * - gpr 2 contains the subchannel id (passed as addr) * - gpr 3 contains the virtqueue index (passed as datamatch) + * - gpr 4 contains the index on the bus (optionally) */ - ret = kvm_io_bus_write(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, - vcpu->run->s.regs.gprs[2], - 8, &vcpu->run->s.regs.gprs[3]); + ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, + vcpu->run->s.regs.gprs[2], + 8, &vcpu->run->s.regs.gprs[3], + vcpu->run->s.regs.gprs[4]); srcu_read_unlock(&vcpu->kvm->srcu, idx); - /* kvm_io_bus_write returns -EOPNOTSUPP if it found no match. */ + + /* + * Return cookie in gpr 2, but don't overwrite the register if the + * diagnose will be handled by userspace. + */ + if (ret != -EOPNOTSUPP) + vcpu->run->s.regs.gprs[2] = ret; + /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */ return ret < 0 ? ret : 0; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ba694d2ba51e..ac8e6670c551 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -28,6 +28,7 @@ #include <asm/pgtable.h> #include <asm/nmi.h> #include <asm/switch_to.h> +#include <asm/facility.h> #include <asm/sclp.h> #include "kvm-s390.h" #include "gaccess.h" @@ -84,9 +85,15 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; -static unsigned long long *facilities; +unsigned long *vfacilities; static struct gmap_notifier gmap_notifier; +/* test availability of vfacility */ +static inline int test_vfacility(unsigned long nr) +{ + return __test_facility(nr, (void *) vfacilities); +} + /* Section: not file related */ int kvm_arch_hardware_enable(void *garbage) { @@ -387,7 +394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->ecb2 = 8; vcpu->arch.sie_block->eca = 0xC1002001U; - vcpu->arch.sie_block->fac = (int) (long) facilities; + vcpu->arch.sie_block->fac = (int) (long) vfacilities; hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, (unsigned long) vcpu); @@ -1056,6 +1063,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) return 0; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, @@ -1122,20 +1133,20 @@ static int __init kvm_s390_init(void) * to hold the maximum amount of facilities. On the other hand, we * only set facilities that are known to work in KVM. */ - facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); - if (!facilities) { + vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); + if (!vfacilities) { kvm_exit(); return -ENOMEM; } - memcpy(facilities, S390_lowcore.stfle_fac_list, 16); - facilities[0] &= 0xff82fff3f47c0000ULL; - facilities[1] &= 0x001c000000000000ULL; + memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); + vfacilities[0] &= 0xff82fff3f47c0000UL; + vfacilities[1] &= 0x001c000000000000UL; return 0; } static void __exit kvm_s390_exit(void) { - free_page((unsigned long) facilities); + free_page((unsigned long) vfacilities); kvm_exit(); } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 028ca9fd2158..dc99f1ca4267 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -24,6 +24,9 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); +/* declare vfacilities extern */ +extern unsigned long *vfacilities; + /* negativ values are error codes, positive values for internal conditions */ #define SIE_INTERCEPT_RERUNVCPU (1<<0) #define SIE_INTERCEPT_UCONTROL (1<<1) @@ -112,6 +115,13 @@ static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; } +/* Set the condition code in the guest program status word */ +static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) +{ + vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44); + vcpu->arch.sie_block->gpsw.mask |= cc << 44; +} + int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); void kvm_s390_tasklet(unsigned long parm); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 0da3e6eb6be6..8f8d8ee9b1fb 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -163,8 +163,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu) kfree(inti); no_interrupt: /* Set condition code and we're done. */ - vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); - vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; + kvm_s390_set_psw_cc(vcpu, cc); return 0; } @@ -219,15 +218,13 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) * Set condition code 3 to stop the guest from issueing channel * I/O instructions. */ - vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); - vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; + kvm_s390_set_psw_cc(vcpu, 3); return 0; } } static int handle_stfl(struct kvm_vcpu *vcpu) { - unsigned int facility_list; int rc; vcpu->stat.instruction_stfl++; @@ -235,15 +232,13 @@ static int handle_stfl(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - /* only pass the facility bits, which we can handle */ - facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3; - rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), - &facility_list, sizeof(facility_list)); + vfacilities, 4); if (rc) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list); - trace_kvm_s390_handle_stfl(vcpu, facility_list); + VCPU_EVENT(vcpu, 5, "store facility list value %x", + *(unsigned int *) vfacilities); + trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); return 0; } @@ -386,7 +381,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); if (fc > 3) { - vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; /* cc 3 */ + kvm_s390_set_psw_cc(vcpu, 3); return 0; } @@ -396,7 +391,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) if (fc == 0) { vcpu->run->s.regs.gprs[0] = 3 << 28; - vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); /* cc 0 */ + kvm_s390_set_psw_cc(vcpu, 0); return 0; } @@ -430,12 +425,11 @@ static int handle_stsi(struct kvm_vcpu *vcpu) } trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); free_page(mem); - vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); + kvm_s390_set_psw_cc(vcpu, 0); vcpu->run->s.regs.gprs[0] = 0; return 0; out_no_data: - /* condition code 3 */ - vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; + kvm_s390_set_psw_cc(vcpu, 3); out_exception: free_page(mem); return rc; @@ -493,12 +487,12 @@ static int handle_epsw(struct kvm_vcpu *vcpu) kvm_s390_get_regs_rre(vcpu, ®1, ®2); /* This basically extracts the mask half of the psw. */ - vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; + vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; if (reg2) { - vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000; + vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; vcpu->run->s.regs.gprs[reg2] |= - vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff; + vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; } return 0; } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 89ebae4008f2..ce36ea80e4f9 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -135,30 +135,17 @@ void __init paging_init(void) void __init mem_init(void) { - unsigned long codesize, reservedpages, datasize, initsize; - - max_mapnr = num_physpages = max_low_pfn; + max_mapnr = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); /* Setup guest page hinting */ cmma_init(); /* this will put all low memory onto the freelists */ - totalram_pages += free_all_bootmem(); + free_all_bootmem(); setup_zero_pages(); /* Setup zeroed pages. */ - reservedpages = 0; - - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - max_mapnr << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >>10, - initsize >> 10); + mem_init_print_info(NULL); printk("Write protected kernel read-only data: %#lx - %#lx\n", (unsigned long)&_stext, PFN_ALIGN((unsigned long)&_eshared) - 1); @@ -166,13 +153,14 @@ void __init mem_init(void) void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(POISON_FREE_INITMEM); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 06bafec00278..40023290ee5b 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -91,11 +91,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } @@ -176,11 +174,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = s390_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = s390_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 17bf4d3d303a..967d0bf1c059 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -335,7 +335,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, if ((from | to | len) & (PMD_SIZE - 1)) return -EINVAL; - if (len == 0 || from + len > PGDIR_SIZE || + if (len == 0 || from + len > TASK_MAX_SIZE || from + len < from || to + len < to) return -EINVAL; @@ -731,6 +731,11 @@ void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte) spin_unlock(&gmap_notifier_lock); } +static inline int page_table_with_pgste(struct page *page) +{ + return atomic_read(&page->_mapcount) == 0; +} + static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, unsigned long vmaddr) { @@ -750,7 +755,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, mp->vmaddr = vmaddr & PMD_MASK; INIT_LIST_HEAD(&mp->mapper); page->index = (unsigned long) mp; - atomic_set(&page->_mapcount, 3); + atomic_set(&page->_mapcount, 0); table = (unsigned long *) page_to_phys(page); clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); @@ -821,6 +826,11 @@ EXPORT_SYMBOL(set_guest_storage_key); #else /* CONFIG_PGSTE */ +static inline int page_table_with_pgste(struct page *page) +{ + return 0; +} + static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, unsigned long vmaddr) { @@ -897,12 +907,12 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) struct page *page; unsigned int bit, mask; - if (mm_has_pgste(mm)) { + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + if (page_table_with_pgste(page)) { gmap_disconnect_pgtable(mm, table); return page_table_free_pgste(table); } /* Free 1K/2K page table fragment of a 4K page */ - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); spin_lock_bh(&mm->context.list_lock); if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) @@ -940,14 +950,14 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) unsigned int bit, mask; mm = tlb->mm; - if (mm_has_pgste(mm)) { + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + if (page_table_with_pgste(page)) { gmap_disconnect_pgtable(mm, table); table = (unsigned long *) (__pa(table) | FRAG_MASK); tlb_remove_table(tlb, table); return; } bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); spin_lock_bh(&mm->context.list_lock); if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) list_del(&page->lru); @@ -1033,36 +1043,120 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -void thp_split_vma(struct vm_area_struct *vma) +static inline void thp_split_vma(struct vm_area_struct *vma) { unsigned long addr; - struct page *page; - for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { - page = follow_page(vma, addr, FOLL_SPLIT); - } + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) + follow_page(vma, addr, FOLL_SPLIT); } -void thp_split_mm(struct mm_struct *mm) +static inline void thp_split_mm(struct mm_struct *mm) { - struct vm_area_struct *vma = mm->mmap; + struct vm_area_struct *vma; - while (vma != NULL) { + for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { thp_split_vma(vma); vma->vm_flags &= ~VM_HUGEPAGE; vma->vm_flags |= VM_NOHUGEPAGE; - vma = vma->vm_next; } + mm->def_flags |= VM_NOHUGEPAGE; +} +#else +static inline void thp_split_mm(struct mm_struct *mm) +{ } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb, + struct mm_struct *mm, pud_t *pud, + unsigned long addr, unsigned long end) +{ + unsigned long next, *table, *new; + struct page *page; + pmd_t *pmd; + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); +again: + if (pmd_none_or_clear_bad(pmd)) + continue; + table = (unsigned long *) pmd_deref(*pmd); + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + if (page_table_with_pgste(page)) + continue; + /* Allocate new page table with pgstes */ + new = page_table_alloc_pgste(mm, addr); + if (!new) { + mm->context.has_pgste = 0; + continue; + } + spin_lock(&mm->page_table_lock); + if (likely((unsigned long *) pmd_deref(*pmd) == table)) { + /* Nuke pmd entry pointing to the "short" page table */ + pmdp_flush_lazy(mm, addr, pmd); + pmd_clear(pmd); + /* Copy ptes from old table to new table */ + memcpy(new, table, PAGE_SIZE/2); + clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); + /* Establish new table */ + pmd_populate(mm, pmd, (pte_t *) new); + /* Free old table with rcu, there might be a walker! */ + page_table_free_rcu(tlb, table); + new = NULL; + } + spin_unlock(&mm->page_table_lock); + if (new) { + page_table_free_pgste(new); + goto again; + } + } while (pmd++, addr = next, addr != end); + + return addr; +} + +static unsigned long page_table_realloc_pud(struct mmu_gather *tlb, + struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end) +{ + unsigned long next; + pud_t *pud; + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + next = page_table_realloc_pmd(tlb, mm, pud, addr, next); + } while (pud++, addr = next, addr != end); + + return addr; +} + +static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long addr, unsigned long end) +{ + unsigned long next; + pgd_t *pgd; + + pgd = pgd_offset(mm, addr); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + next = page_table_realloc_pud(tlb, mm, pgd, addr, next); + } while (pgd++, addr = next, addr != end); +} + /* * switch on pgstes for its userspace process (for kvm) */ int s390_enable_sie(void) { struct task_struct *tsk = current; - struct mm_struct *mm, *old_mm; + struct mm_struct *mm = tsk->mm; + struct mmu_gather tlb; /* Do we have switched amode? If no, we cannot do sie */ if (s390_user_mode == HOME_SPACE_MODE) @@ -1072,57 +1166,16 @@ int s390_enable_sie(void) if (mm_has_pgste(tsk->mm)) return 0; - /* lets check if we are allowed to replace the mm */ - task_lock(tsk); - if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || -#ifdef CONFIG_AIO - !hlist_empty(&tsk->mm->ioctx_list) || -#endif - tsk->mm != tsk->active_mm) { - task_unlock(tsk); - return -EINVAL; - } - task_unlock(tsk); - - /* we copy the mm and let dup_mm create the page tables with_pgstes */ - tsk->mm->context.alloc_pgste = 1; - /* make sure that both mms have a correct rss state */ - sync_mm_rss(tsk->mm); - mm = dup_mm(tsk); - tsk->mm->context.alloc_pgste = 0; - if (!mm) - return -ENOMEM; - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE + down_write(&mm->mmap_sem); /* split thp mappings and disable thp for future mappings */ thp_split_mm(mm); - mm->def_flags |= VM_NOHUGEPAGE; -#endif - - /* Now lets check again if something happened */ - task_lock(tsk); - if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || -#ifdef CONFIG_AIO - !hlist_empty(&tsk->mm->ioctx_list) || -#endif - tsk->mm != tsk->active_mm) { - mmput(mm); - task_unlock(tsk); - return -EINVAL; - } - - /* ok, we are alone. No ptrace, no threads, etc. */ - old_mm = tsk->mm; - tsk->mm = tsk->active_mm = mm; - preempt_disable(); - update_mm(mm, tsk); - atomic_inc(&mm->context.attach_count); - atomic_dec(&old_mm->context.attach_count); - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - preempt_enable(); - task_unlock(tsk); - mmput(old_mm); - return 0; + /* Reallocate the page tables with pgstes */ + mm->context.has_pgste = 1; + tlb_gather_mmu(&tlb, mm, 0); + page_table_realloc(&tlb, mm, 0, TASK_SIZE); + tlb_finish_mmu(&tlb, 0, -1); + up_write(&mm->mmap_sem); + return mm->context.has_pgste ? 0 : -ENOMEM; } EXPORT_SYMBOL_GPL(s390_enable_sie); @@ -1165,7 +1218,8 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, } } -void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) +void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable) { struct list_head *lh = (struct list_head *) pgtable; @@ -1179,7 +1233,7 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) mm->pmd_huge_pte = pgtable; } -pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm) +pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) { struct list_head *lh; pgtable_t pgtable; diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S index eebcbaa4e978..7274b5c4287e 100644 --- a/arch/score/kernel/vmlinux.lds.S +++ b/arch/score/kernel/vmlinux.lds.S @@ -49,6 +49,7 @@ SECTIONS } . = ALIGN(16); + _sdata = .; /* Start of data section */ RODATA EXCEPTION_TABLE(16) diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 47b600e4b2c5..6b18fb0189ae 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c @@ -172,10 +172,10 @@ out_of_memory: down_read(&mm->mmap_sem); goto survive; } - printk("VM: killing process %s\n", tsk->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c index 0940682ab38b..9fbce49ad3bd 100644 --- a/arch/score/mm/init.c +++ b/arch/score/mm/init.c @@ -75,40 +75,19 @@ void __init paging_init(void) void __init mem_init(void) { - unsigned long codesize, reservedpages, datasize, initsize; - unsigned long tmp, ram = 0; - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - totalram_pages += free_all_bootmem(); + free_all_bootmem(); setup_zero_page(); /* Setup zeroed pages. */ - reservedpages = 0; - - for (tmp = 0; tmp < max_low_pfn; tmp++) - if (page_is_ram(tmp)) { - ram++; - if (PageReserved(pfn_to_page(tmp))) - reservedpages++; - } - - num_physpages = ram; - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " - "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - ram << (PAGE_SHIFT-10), codesize >> 10, - reservedpages << (PAGE_SHIFT-10), datasize >> 10, - initsize >> 10, - totalhigh_pages << (PAGE_SHIFT-10)); + + mem_init_print_info(NULL); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c index d71a0bcf8145..4d94dff9015c 100644 --- a/arch/sh/boards/board-espt.c +++ b/arch/sh/boards/board-espt.c @@ -85,7 +85,7 @@ static struct sh_eth_plat_data sh7763_eth_pdata = { }; static struct platform_device espt_eth_device = { - .name = "sh-eth", + .name = "sh7763-gether", .resource = sh_eth_resources, .num_resources = ARRAY_SIZE(sh_eth_resources), .dev = { diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c index 41f86702eb9f..4f114d1cd019 100644 --- a/arch/sh/boards/board-sh7757lcr.c +++ b/arch/sh/boards/board-sh7757lcr.c @@ -82,7 +82,7 @@ static struct sh_eth_plat_data sh7757_eth0_pdata = { }; static struct platform_device sh7757_eth0_device = { - .name = "sh-eth", + .name = "sh7757-ether", .resource = sh_eth0_resources, .id = 0, .num_resources = ARRAY_SIZE(sh_eth0_resources), @@ -111,7 +111,7 @@ static struct sh_eth_plat_data sh7757_eth1_pdata = { }; static struct platform_device sh7757_eth1_device = { - .name = "sh-eth", + .name = "sh7757-ether", .resource = sh_eth1_resources, .id = 1, .num_resources = ARRAY_SIZE(sh_eth1_resources), @@ -157,7 +157,7 @@ static struct sh_eth_plat_data sh7757_eth_giga0_pdata = { }; static struct platform_device sh7757_eth_giga0_device = { - .name = "sh-eth", + .name = "sh7757-gether", .resource = sh_eth_giga0_resources, .id = 2, .num_resources = ARRAY_SIZE(sh_eth_giga0_resources), @@ -192,7 +192,7 @@ static struct sh_eth_plat_data sh7757_eth_giga1_pdata = { }; static struct platform_device sh7757_eth_giga1_device = { - .name = "sh-eth", + .name = "sh7757-gether", .resource = sh_eth_giga1_resources, .id = 3, .num_resources = ARRAY_SIZE(sh_eth_giga1_resources), diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 764530c85aa9..61fade0ffa96 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -165,8 +165,8 @@ static struct sh_eth_plat_data sh_eth_plat = { }; static struct platform_device sh_eth_device = { - .name = "sh-eth", - .id = 0, + .name = "sh7724-ether", + .id = 0, .dev = { .platform_data = &sh_eth_plat, }, diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c index 9759d6ba7ffb..658326f44df8 100644 --- a/arch/sh/boards/mach-se/770x/setup.c +++ b/arch/sh/boards/mach-se/770x/setup.c @@ -128,8 +128,8 @@ static struct resource sh_eth0_resources[] = { }; static struct platform_device sh_eth0_device = { - .name = "sh-eth", - .id = 0, + .name = "sh771x-ether", + .id = 0, .dev = { .platform_data = PHY_ID, }, @@ -151,8 +151,8 @@ static struct resource sh_eth1_resources[] = { }; static struct platform_device sh_eth1_device = { - .name = "sh-eth", - .id = 1, + .name = "sh771x-ether", + .id = 1, .dev = { .platform_data = PHY_ID, }, diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 4010e63e82d8..b70180ef3e29 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -380,8 +380,8 @@ static struct sh_eth_plat_data sh_eth_plat = { }; static struct platform_device sh_eth_device = { - .name = "sh-eth", - .id = 0, + .name = "sh7724-ether", + .id = 0, .dev = { .platform_data = &sh_eth_plat, }, diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c index b7c75298dfb5..50ba481fa240 100644 --- a/arch/sh/boards/mach-sh7763rdp/setup.c +++ b/arch/sh/boards/mach-sh7763rdp/setup.c @@ -93,7 +93,7 @@ static struct sh_eth_plat_data sh7763_eth_pdata = { }; static struct platform_device sh7763rdp_eth_device = { - .name = "sh-eth", + .name = "sh7763-gether", .resource = sh_eth_resources, .num_resources = ARRAY_SIZE(sh_eth_resources), .dev = { diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index e0b740c831c7..bb11e1925178 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -124,8 +124,8 @@ static struct resource eth_resources[] = { }; static struct platform_device eth_device = { - .name = "sh-eth", - .id = -1, + .name = "sh7619-ether", + .id = -1, .dev = { .platform_data = (void *)1, }, diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 5f30f805d2f2..0128af3399b7 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -329,7 +329,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC0]), CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[HWBLK_IIC1]), CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[HWBLK_MMC]), - CLKDEV_DEV_ID("sh-eth.0", &mstp_clks[HWBLK_ETHER]), + CLKDEV_DEV_ID("sh7724-ether.0", &mstp_clks[HWBLK_ETHER]), CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]), CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]), CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]), diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c index deb683abacf0..ed9501519ab3 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c @@ -238,7 +238,7 @@ static struct clk_lookup lookups[] = { CLKDEV_CON_ID("adc0", &mstp_clks[MSTP313]), CLKDEV_CON_ID("mtu0", &mstp_clks[MSTP312]), CLKDEV_CON_ID("iebus0", &mstp_clks[MSTP304]), - CLKDEV_DEV_ID("sh-eth.0", &mstp_clks[MSTP114]), + CLKDEV_DEV_ID("sh7734-gether.0", &mstp_clks[MSTP114]), CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP303]), CLKDEV_CON_ID("hif0", &mstp_clks[MSTP302]), CLKDEV_CON_ID("stif0", &mstp_clks[MSTP301]), diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 81f999a672f6..668c81631c08 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -117,11 +117,7 @@ void user_enable_single_step(struct task_struct *child) set_tsk_thread_flag(child, TIF_SINGLESTEP); - if (ptrace_get_breakpoints(child) < 0) - return; - set_single_step(child, pc); - ptrace_put_breakpoints(child); } void user_disable_single_step(struct task_struct *child) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 20f9ead650d3..33890fd267cb 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -407,30 +407,16 @@ unsigned int mem_init_done = 0; void __init mem_init(void) { - int codesize, datasize, initsize; - int nid; + pg_data_t *pgdat; iommu_init(); - num_physpages = 0; high_memory = NULL; + for_each_online_pgdat(pgdat) + high_memory = max_t(void *, high_memory, + __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); - for_each_online_node(nid) { - pg_data_t *pgdat = NODE_DATA(nid); - void *node_high_memory; - - num_physpages += pgdat->node_present_pages; - - if (pgdat->node_spanned_pages) - totalram_pages += free_all_bootmem_node(pgdat); - - - node_high_memory = (void *)__va((pgdat->node_start_pfn + - pgdat->node_spanned_pages) << - PAGE_SHIFT); - if (node_high_memory > high_memory) - high_memory = node_high_memory; - } + free_all_bootmem(); /* Set this up early, so we can take care of the zero page */ cpu_cache_init(); @@ -441,19 +427,8 @@ void __init mem_init(void) vsyscall_init(); - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " - "%dk data, %dk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - datasize >> 10, - initsize >> 10); - - printk(KERN_INFO "virtual kernel memory layout:\n" + mem_init_print_info(NULL); + pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" @@ -499,13 +474,13 @@ void __init mem_init(void) void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index b836e9297f2a..c2f6ff6d7a35 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h @@ -108,7 +108,7 @@ static inline int sparc_leon3_snooping_enabled(void) { u32 cctrl; __asm__ __volatile__("lda [%%g0] 2, %0\n\t" : "=r"(cctrl)); - return (cctrl >> 23) & 1; + return ((cctrl >> 23) & 1) && ((cctrl >> 17) & 1); }; static inline void sparc_leon3_disable_cache(void) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 79c214efa3fe..36760317814f 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -853,10 +853,11 @@ extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd); #define __HAVE_ARCH_PGTABLE_DEPOSIT -extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); #define __HAVE_ARCH_PGTABLE_WITHDRAW -extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif /* Encode and de-code a swap entry */ diff --git a/arch/sparc/include/uapi/asm/fcntl.h b/arch/sparc/include/uapi/asm/fcntl.h index d73e5e008b0d..7e8ace5bf760 100644 --- a/arch/sparc/include/uapi/asm/fcntl.h +++ b/arch/sparc/include/uapi/asm/fcntl.h @@ -35,7 +35,7 @@ #define O_SYNC (__O_SYNC|O_DSYNC) #define O_PATH 0x1000000 -#define O_TMPFILE 0x2000000 +#define __O_TMPFILE 0x2000000 #define F_GETOWN 5 /* for sockets. */ #define F_SETOWN 6 /* for sockets. */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 89f49b68a21c..4e1d66c3ce71 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -70,6 +70,8 @@ #define SO_SELECT_ERR_QUEUE 0x0029 +#define SO_BUSY_POLL 0x0030 + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index 961b87f99e69..f76389a32342 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -49,6 +49,8 @@ int foo(void) DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread)); BLANK(); DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */ return 0; diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 5ef48dab5636..11d460f6f9cc 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value) char *base, *p; int msg_len, loops; + if (strlen(var) + strlen(value) + 2 > + sizeof(pkt) - sizeof(pkt.header)) { + printk(KERN_ERR PFX + "contents length: %zu, which more than max: %lu," + "so could not set (%s) variable to (%s).\n", + strlen(var) + strlen(value) + 2, + sizeof(pkt) - sizeof(pkt.header), var, value); + return; + } + memset(&pkt, 0, sizeof(pkt)); pkt.header.data.tag.type = DS_DATA; pkt.header.data.handle = cp->handle; diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 6cfc1b09ec25..d7aa524b7283 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -254,15 +254,12 @@ void __init leon_smp_done(void) /* Free unneeded trap tables */ if (!cpu_present(1)) { free_reserved_page(virt_to_page(&trapbase_cpu1)); - num_physpages++; } if (!cpu_present(2)) { free_reserved_page(virt_to_page(&trapbase_cpu2)); - num_physpages++; } if (!cpu_present(3)) { free_reserved_page(virt_to_page(&trapbase_cpu3)); - num_physpages++; } /* Ok, they are spinning and ready to go. */ smp_processors_ready = 1; diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 2031c65fd4ea..bc4d3f5d2e5d 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -254,7 +254,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, const char *type; u32 class; - dev = alloc_pci_dev(); + dev = pci_alloc_dev(bus); if (!dev) return NULL; @@ -281,7 +281,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, printk(" create device, devfn: %x, type: %s\n", devfn, type); - dev->bus = bus; dev->sysdata = node; dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; @@ -327,7 +326,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) pci_set_master(dev); - dev->current_state = 4; /* unknown power state */ + dev->current_state = PCI_UNKNOWN; /* unknown power state */ dev->error_state = pci_channel_io_normal; dev->dma_mask = 0xffffffff; diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 2daaaa6eda23..51561b8b15ba 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -290,7 +290,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { /* We know it's 32-bit */ unsigned long task_size = STACK_TOP32; @@ -302,7 +301,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S index 44aad32eeb4e..969f96450f69 100644 --- a/arch/sparc/mm/hypersparc.S +++ b/arch/sparc/mm/hypersparc.S @@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out: /* The things we do for performance... */ hypersparc_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 @@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out: */ /* Verified, my ass... */ hypersparc_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %g2 #ifndef CONFIG_SMP cmp %g2, -1 @@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index af472cf7c69a..db6987082805 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -288,10 +288,6 @@ static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) void __init mem_init(void) { - int codepages = 0; - int datapages = 0; - int initpages = 0; - int reservedpages = 0; int i; if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { @@ -323,15 +319,12 @@ void __init mem_init(void) max_mapnr = last_valid_pfn - pfn_base; high_memory = __va(max_low_pfn << PAGE_SHIFT); - - totalram_pages = free_all_bootmem(); + free_all_bootmem(); for (i = 0; sp_banks[i].num_bytes != 0; i++) { unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; - num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT; - if (end_pfn <= highstart_pfn) continue; @@ -341,39 +334,19 @@ void __init mem_init(void) map_high_region(start_pfn, end_pfn); } - codepages = (((unsigned long) &_etext) - ((unsigned long)&_start)); - codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; - datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext)); - datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; - initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); - initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; - - /* Ignore memory holes for the purpose of counting reserved pages */ - for (i=0; i < max_low_pfn; i++) - if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap) - && PageReserved(pfn_to_page(i))) - reservedpages++; - - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT - 10), - codepages << (PAGE_SHIFT-10), - reservedpages << (PAGE_SHIFT - 10), - datapages << (PAGE_SHIFT-10), - initpages << (PAGE_SHIFT-10), - totalhigh_pages << (PAGE_SHIFT-10)); + mem_init_print_info(NULL); } void free_initmem (void) { - num_physpages += free_initmem_default(POISON_FREE_INITMEM); + free_initmem_default(POISON_FREE_INITMEM); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - num_physpages += free_reserved_area(start, end, POISON_FREE_INITMEM, - "initrd"); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 04fd55a6e461..a9c42a7ffb6a 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2045,7 +2045,6 @@ static void __init register_page_bootmem_info(void) } void __init mem_init(void) { - unsigned long codepages, datapages, initpages; unsigned long addr, last; addr = PAGE_OFFSET + kern_base; @@ -2061,12 +2060,7 @@ void __init mem_init(void) high_memory = __va(last_valid_pfn << PAGE_SHIFT); register_page_bootmem_info(); - totalram_pages = free_all_bootmem(); - - /* We subtract one to account for the mem_map_zero page - * allocated below. - */ - num_physpages = totalram_pages - 1; + free_all_bootmem(); /* * Set up the zero page, mark it reserved, so that page count @@ -2079,19 +2073,7 @@ void __init mem_init(void) } mark_page_reserved(mem_map_zero); - codepages = (((unsigned long) _etext) - ((unsigned long) _start)); - codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; - datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); - datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; - initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); - initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; - - printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", - nr_free_pages() << (PAGE_SHIFT-10), - codepages << (PAGE_SHIFT-10), - datapages << (PAGE_SHIFT-10), - initpages << (PAGE_SHIFT-10), - PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); + mem_init_print_info(NULL); if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_ecache_flush_init(); @@ -2131,8 +2113,8 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - num_physpages += free_reserved_area(start, end, POISON_FREE_INITMEM, - "initrd"); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S index c801c3953a00..5d2b88d39424 100644 --- a/arch/sparc/mm/swift.S +++ b/arch/sparc/mm/swift.S @@ -105,7 +105,7 @@ swift_flush_cache_mm_out: .globl swift_flush_cache_range swift_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 sub %o2, %o1, %o2 sethi %hi(4096), %o3 cmp %o2, %o3 @@ -116,7 +116,7 @@ swift_flush_cache_range: .globl swift_flush_cache_page swift_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 70: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -219,7 +219,7 @@ swift_flush_sig_insns: .globl swift_flush_tlb_range .globl swift_flush_tlb_all swift_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 swift_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -233,7 +233,7 @@ swift_flush_tlb_all_out: .globl swift_flush_tlb_page swift_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 37e7bc4c95b3..7a91f288c708 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -188,7 +188,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, } } -void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) +void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable) { struct list_head *lh = (struct list_head *) pgtable; @@ -202,7 +203,7 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) mm->pmd_huge_pte = pgtable; } -pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm) +pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) { struct list_head *lh; pgtable_t pgtable; diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S index 4e55e8f76648..bf10a345fa8b 100644 --- a/arch/sparc/mm/tsunami.S +++ b/arch/sparc/mm/tsunami.S @@ -24,7 +24,7 @@ /* Sliiick... */ tsunami_flush_cache_page: tsunami_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_cache_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -46,7 +46,7 @@ tsunami_flush_sig_insns: /* More slick stuff... */ tsunami_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -65,7 +65,7 @@ tsunami_flush_tlb_out: /* This one can be done in a fine grained manner... */ tsunami_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S index bf8ee0613ae7..852257fcc82b 100644 --- a/arch/sparc/mm/viking.S +++ b/arch/sparc/mm/viking.S @@ -108,7 +108,7 @@ viking_mxcc_flush_page: viking_flush_cache_page: viking_flush_cache_range: #ifndef CONFIG_SMP - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #endif viking_flush_cache_mm: #ifndef CONFIG_SMP @@ -148,7 +148,7 @@ viking_flush_tlb_mm: #endif viking_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -173,7 +173,7 @@ viking_flush_tlb_range: #endif viking_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -239,7 +239,7 @@ sun4dsmp_flush_tlb_range: tst %g5 bne 3f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 @@ -265,7 +265,7 @@ sun4dsmp_flush_tlb_page: tst %g5 bne 2f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 and %o1, PAGE_MASK, %o1 diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index d36a85ebb5e0..9c7be59e6f5a 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -785,9 +785,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; break; } if (proglen == oldproglen) { - image = module_alloc(max_t(unsigned int, - proglen, - sizeof(struct work_struct))); + image = module_alloc(proglen); if (!image) goto out; } @@ -806,20 +804,8 @@ out: return; } -static void jit_free_defer(struct work_struct *arg) -{ - module_free(NULL, arg); -} - -/* run from softirq, we must use a work_struct to call - * module_free() from process context - */ void bpf_jit_free(struct sk_filter *fp) { - if (fp->bpf_func != sk_run_filter) { - struct work_struct *work = (struct work_struct *)fp->bpf_func; - - INIT_WORK(work, jit_free_defer); - schedule_work(work); - } + if (fp->bpf_func != sk_run_filter) + module_free(NULL, fp->bpf_func); } diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 3aa37669ff8c..24565a7ffe6d 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -25,6 +25,7 @@ config TILE select HAVE_ARCH_TRACEHOOK select HAVE_SYSCALL_TRACEPOINTS select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select HAVE_DEBUG_STACKOVERFLOW # FIXME: investigate whether we need/want these options. # select HAVE_IOREMAP_PROT diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug index ddbfc3322d7f..9165ea979e85 100644 --- a/arch/tile/Kconfig.debug +++ b/arch/tile/Kconfig.debug @@ -14,13 +14,6 @@ config EARLY_PRINTK with klogd/syslogd. You should normally N here, unless you want to debug such a crash. -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - help - This option will cause messages to be printed if free stack space - drops below a certain limit. - config DEBUG_EXTRA_FLAGS string "Additional compiler arguments when building with '-g'" depends on DEBUG_INFO diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h index d062d463fca9..7d8a935a9238 100644 --- a/arch/tile/include/asm/sections.h +++ b/arch/tile/include/asm/sections.h @@ -34,7 +34,7 @@ extern char __sys_cmpxchg_grab_lock[]; extern char __start_atomic_asm_code[], __end_atomic_asm_code[]; #endif -/* Handle the discontiguity between _sdata and _stext. */ +/* Handle the discontiguity between _sdata and _text. */ static inline int arch_is_kernel_data(unsigned long addr) { return addr >= (unsigned long)_sdata && diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 7a5aa1a7864e..68b542677f6a 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -307,8 +307,8 @@ static void __cpuinit store_permanent_mappings(void) hv_store_mapping(addr, pages << PAGE_SHIFT, pa); } - hv_store_mapping((HV_VirtAddr)_stext, - (uint32_t)(_einittext - _stext), 0); + hv_store_mapping((HV_VirtAddr)_text, + (uint32_t)(_einittext - _text), 0); } /* @@ -329,6 +329,7 @@ static void __init setup_memory(void) #if defined(CONFIG_HIGHMEM) || defined(__tilegx__) long lowmem_pages; #endif + unsigned long physpages = 0; /* We are using a char to hold the cpu_2_node[] mapping */ BUILD_BUG_ON(MAX_NUMNODES > 127); @@ -388,8 +389,8 @@ static void __init setup_memory(void) continue; } } - if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { - int max_size = maxmem_pfn - num_physpages; + if (physpages + PFN_DOWN(range.size) > maxmem_pfn) { + int max_size = maxmem_pfn - physpages; if (max_size > 0) { pr_err("Maxmem reduced node %d to %d pages\n", i, max_size); @@ -446,7 +447,7 @@ static void __init setup_memory(void) node_start_pfn[i] = start; node_end_pfn[i] = end; node_controller[i] = range.controller; - num_physpages += size; + physpages += size; max_pfn = end; /* Mark node as online */ @@ -465,7 +466,7 @@ static void __init setup_memory(void) * we're willing to use at 8 million pages (32GB of 4KB pages). */ cap = 8 * 1024 * 1024; /* 8 million pages */ - if (num_physpages > cap) { + if (physpages > cap) { int num_nodes = num_online_nodes(); int cap_each = cap / num_nodes; unsigned long dropped_pages = 0; @@ -476,10 +477,10 @@ static void __init setup_memory(void) node_end_pfn[i] = node_start_pfn[i] + cap_each; } } - num_physpages -= dropped_pages; + physpages -= dropped_pages; pr_warning("Only using %ldMB memory;" " ignoring %ldMB.\n", - num_physpages >> (20 - PAGE_SHIFT), + physpages >> (20 - PAGE_SHIFT), dropped_pages >> (20 - PAGE_SHIFT)); pr_warning("Consider using a larger page size.\n"); } @@ -497,7 +498,7 @@ static void __init setup_memory(void) lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? MAXMEM_PFN : mappable_physpages; - highmem_pages = (long) (num_physpages - lowmem_pages); + highmem_pages = (long) (physpages - lowmem_pages); pr_notice("%ldMB HIGHMEM available.\n", pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); @@ -514,7 +515,6 @@ static void __init setup_memory(void) pr_warning("Use a HIGHMEM enabled kernel.\n"); max_low_pfn = MAXMEM_PFN; max_pfn = MAXMEM_PFN; - num_physpages = MAXMEM_PFN; node_end_pfn[0] = MAXMEM_PFN; } else { pr_notice("%ldMB memory available.\n", diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index 631f10de12fe..a13ed902afbb 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S @@ -27,7 +27,6 @@ SECTIONS .intrpt1 (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */ { _text = .; - _stext = .; *(.intrpt1) } :intrpt1 =0 @@ -36,6 +35,7 @@ SECTIONS /* Now the real code */ . = ALIGN(0x20000); + _stext = .; .text : AT (ADDR(.text) - LOAD_OFFSET) { HEAD_TEXT SCHED_TEXT @@ -58,11 +58,13 @@ SECTIONS #define LOAD_OFFSET PAGE_OFFSET . = ALIGN(PAGE_SIZE); + __init_begin = .; VMLINUX_SYMBOL(_sinitdata) = .; INIT_DATA_SECTION(16) :data =0 PERCPU_SECTION(L2_CACHE_BYTES) . = ALIGN(PAGE_SIZE); VMLINUX_SYMBOL(_einitdata) = .; + __init_end = .; _sdata = .; /* Start of data section */ diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 3d2b81c163a6..f7f99f90cbe0 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -573,10 +573,10 @@ out_of_memory: down_read(&mm->mmap_sem); goto survive; } - pr_alert("VM: killing process %s\n", tsk->comm); - if (!is_kernel_mode) - do_group_exit(SIGKILL); - goto no_context; + if (is_kernel_mode) + goto no_context; + pagefault_out_of_memory(); + return 0; do_sigbus: up_read(&mm->mmap_sem); diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 2749515a0547..e182958c707d 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -562,7 +562,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) prot = ktext_set_nocache(prot); } - BUG_ON(address != (unsigned long)_stext); + BUG_ON(address != (unsigned long)_text); pte = NULL; for (; address < (unsigned long)_einittext; pfn++, address += PAGE_SIZE) { @@ -720,7 +720,7 @@ static void __init init_free_pfn_range(unsigned long start, unsigned long end) } init_page_count(page); __free_pages(page, order); - totalram_pages += count; + adjust_managed_page_count(page, count); page += count; pfn += count; @@ -821,7 +821,6 @@ static void __init set_max_mapnr_init(void) void __init mem_init(void) { - int codesize, datasize, initsize; int i; #ifndef __tilegx__ void *last; @@ -846,26 +845,14 @@ void __init mem_init(void) set_max_mapnr_init(); /* this will put all bootmem onto the freelists */ - totalram_pages += free_all_bootmem(); + free_all_bootmem(); #ifndef CONFIG_64BIT /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ set_non_bootmem_pages_init(); #endif - codesize = (unsigned long)&_etext - (unsigned long)&_text; - datasize = (unsigned long)&_end - (unsigned long)&_sdata; - initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; - initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; - - pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - datasize >> 10, - initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) - ); + mem_init_print_info(NULL); /* * In debug mode, dump some interesting memory mappings. @@ -1024,16 +1011,13 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) pte_clear(&init_mm, addr, ptep); continue; } - __ClearPageReserved(page); - init_page_count(page); if (pte_huge(*ptep)) BUG_ON(!kdata_huge); else set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - free_page(addr); - totalram_pages++; + free_reserved_page(page); } pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c index f96f4cec602a..d67d91ebf63e 100644 --- a/arch/tile/mm/mmap.c +++ b/arch/tile/mm/mmap.c @@ -66,10 +66,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(mm); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index 4938de5512d2..1dd5bd8a8c59 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S @@ -57,7 +57,6 @@ *(.uml.initcall.init) __uml_initcall_end = .; } - __init_end = .; SECURITY_INIT diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index fb8fd6fb6563..adde088aeeff 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -14,8 +14,6 @@ SECTIONS __binary_start = .; . = ALIGN(4096); /* Init code and data */ _text = .; - _stext = .; - __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) . = ALIGN(PAGE_SIZE); @@ -67,6 +65,7 @@ SECTIONS } =0x90909090 .plt : { *(.plt) } .text : { + _stext = .; TEXT_TEXT SCHED_TEXT LOCK_TEXT @@ -91,7 +90,9 @@ SECTIONS #include <asm/common.lds.S> + __init_begin = .; init.data : { INIT_DATA } + __init_end = .; /* Ensure the __preinit_array_start label is properly aligned. We could instead move the label definition inside the section, but @@ -155,6 +156,7 @@ SECTIONS . = ALIGN(32 / 8); . = ALIGN(32 / 8); } + __bss_stop = .; _end = .; PROVIDE (end = .); diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 9df292b270a8..7ddb64baf327 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -65,15 +65,13 @@ void __init mem_init(void) uml_reserved = brk_end; /* this will put all low memory onto the freelists */ - totalram_pages = free_all_bootmem(); + free_all_bootmem(); max_low_pfn = totalram_pages; #ifdef CONFIG_HIGHMEM setup_highmem(end_iomem, highmem); #endif - num_physpages = totalram_pages; max_pfn = totalram_pages; - printk(KERN_INFO "Memory: %luk available\n", - nr_free_pages() << (PAGE_SHIFT-10)); + mem_init_print_info(NULL); kmalloc_ok = 1; } @@ -244,7 +242,7 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index ff65fb4f1a95..6899195602b7 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -20,13 +20,12 @@ SECTIONS . = START + SIZEOF_HEADERS; _text = .; - _stext = .; - __init_begin = .; INIT_TEXT_SECTION(0) . = ALIGN(PAGE_SIZE); .text : { + _stext = .; TEXT_TEXT SCHED_TEXT LOCK_TEXT @@ -62,7 +61,10 @@ SECTIONS #include <asm/common.lds.S> + __init_begin = .; init.data : { INIT_DATA } + __init_end = .; + .data : { INIT_TASK_DATA(KERNEL_STACK_SIZE) @@ -97,6 +99,7 @@ SECTIONS PROVIDE(_bss_start = .); SBSS(0) BSS(0) + __bss_stop = .; _end = .; PROVIDE (end = .); diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h index 5eddb997defe..debafc40200a 100644 --- a/arch/unicore32/include/asm/memory.h +++ b/arch/unicore32/include/asm/memory.h @@ -98,12 +98,6 @@ /* * Conversion between a struct page and a physical address. * - * Note: when converting an unknown physical address to a - * struct page, the resulting pointer must be validated - * using VALID_PAGE(). It must return an invalid struct page - * for any physical address not corresponding to a system - * RAM address. - * * page_to_pfn(page) convert a struct page * to a PFN number * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * * diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c index ef69c0c82825..374a055a8e6b 100644 --- a/arch/unicore32/kernel/pci.c +++ b/arch/unicore32/kernel/pci.c @@ -277,11 +277,6 @@ static int __init pci_common_init(void) pci_bus_assign_resources(puv3_bus); } - /* - * Tell drivers about devices found. - */ - pci_bus_add_devices(puv3_bus); - return 0; } subsys_initcall(pci_common_init); diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index c9447691bdac..778ebba80827 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -51,16 +51,6 @@ void arch_cpu_idle(void) local_irq_enable(); } -static char reboot_mode = 'h'; - -int __init reboot_setup(char *str) -{ - reboot_mode = str[0]; - return 1; -} - -__setup("reboot=", reboot_setup); - void machine_halt(void) { gpio_set_value(GPO_SOFT_OFF, 0); @@ -88,7 +78,7 @@ void machine_restart(char *cmd) * we may need it to insert some 1:1 mappings so that * soft boot works. */ - setup_mm_for_reboot(reboot_mode); + setup_mm_for_reboot(); /* Clean and invalidate caches */ flush_cache_all(); @@ -102,7 +92,7 @@ void machine_restart(char *cmd) /* * Now handle reboot code. */ - if (reboot_mode == 's') { + if (reboot_mode == REBOOT_SOFT) { /* Jump into ROM at address 0xffff0000 */ cpu_reset(VECTORS_BASE); } else { diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h index 30f749da8f73..f5c51b85ad24 100644 --- a/arch/unicore32/kernel/setup.h +++ b/arch/unicore32/kernel/setup.h @@ -22,7 +22,7 @@ extern void puv3_ps2_init(void); extern void pci_puv3_preinit(void); extern void __init puv3_init_gpio(void); -extern void setup_mm_for_reboot(char mode); +extern void setup_mm_for_reboot(void); extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index 63df12d71ce3..ae6bc036db92 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c @@ -383,59 +383,14 @@ static void __init free_unused_memmap(struct meminfo *mi) */ void __init mem_init(void) { - unsigned long reserved_pages, free_pages; - struct memblock_region *reg; - int i; - max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; free_unused_memmap(&meminfo); /* this will put all unused low memory onto the freelists */ - totalram_pages += free_all_bootmem(); - - reserved_pages = free_pages = 0; - - for_each_bank(i, &meminfo) { - struct membank *bank = &meminfo.bank[i]; - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - if (PageReserved(page)) - reserved_pages++; - else if (!page_count(page)) - free_pages++; - page++; - } while (page < end); - } - - /* - * Since our memory may not be contiguous, calculate the - * real number of pages we have in this system - */ - printk(KERN_INFO "Memory:"); - num_physpages = 0; - for_each_memblock(memory, reg) { - unsigned long pages = memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - num_physpages += pages; - printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); - } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - - printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", - nr_free_pages() << (PAGE_SHIFT-10), - free_pages << (PAGE_SHIFT-10), - reserved_pages << (PAGE_SHIFT-10), - totalhigh_pages << (PAGE_SHIFT-10)); + free_all_bootmem(); + mem_init_print_info(NULL); printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" @@ -464,7 +419,7 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); BUG_ON(TASK_SIZE > MODULES_VADDR); - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { /* * On a machine this small we won't get * anywhere without overcommit, so turn @@ -476,7 +431,7 @@ void __init mem_init(void) void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -486,7 +441,7 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c index 43c20b40e444..4f5a532bee13 100644 --- a/arch/unicore32/mm/mmu.c +++ b/arch/unicore32/mm/mmu.c @@ -445,7 +445,7 @@ void __init paging_init(void) * the user-mode pages. This will then ensure that we have predictable * results when turning the mmu off */ -void setup_mm_for_reboot(char mode) +void setup_mm_for_reboot(void) { unsigned long base_pmdval; pgd_t *pgd; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b094816a7e0f..b32ebf92b0ce 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -65,6 +65,7 @@ config X86 select HAVE_KERNEL_LZMA select HAVE_KERNEL_XZ select HAVE_KERNEL_LZO + select HAVE_KERNEL_LZ4 select HAVE_HW_BREAKPOINT select HAVE_MIXED_BREAKPOINTS_REGS select PERF_EVENTS @@ -102,6 +103,7 @@ config X86 select HAVE_ARCH_SECCOMP_FILTER select BUILDTIME_EXTABLE_SORT select GENERIC_CMOS_UPDATE + select HAVE_ARCH_SOFT_DIRTY select CLOCKSOURCE_WATCHDOG select GENERIC_CLOCKEVENTS select ARCH_CLOCKSOURCE_DATA if X86_64 @@ -121,6 +123,7 @@ config X86 select OLD_SIGACTION if X86_32 select COMPAT_OLD_SIGACTION if IA32_EMULATION select RTC_LIB + select HAVE_DEBUG_STACKOVERFLOW config INSTRUCTION_DECODER def_bool y @@ -2258,11 +2261,11 @@ source "drivers/pcmcia/Kconfig" source "drivers/pci/hotplug/Kconfig" config RAPIDIO - bool "RapidIO support" + tristate "RapidIO support" depends on PCI default n help - If you say Y here, the kernel will include drivers and + If enabled this option will include drivers and the core infrastructure code to support RapidIO interconnect devices. source "drivers/rapidio/Kconfig" diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index c963881de0d0..78d91afb8e50 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -59,16 +59,6 @@ config EARLY_PRINTK_DBGP with klogd/syslogd or the X server. You should normally N here, unless you want to debug such a crash. You need usb debug device. -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - ---help--- - Say Y here if you want to check the overflows of kernel, IRQ - and exception stacks. This option will cause messages of the - stacks in detail when free stack space drops below a certain - limit. - If in doubt, say "N". - config X86_PTDUMP bool "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 5ef205c5f37b..dcd90df10ab4 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,8 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ + vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -63,12 +64,15 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE $(call if_changed,xzkern) $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE $(call if_changed,lzo) +$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE + $(call if_changed,lz4) suffix-$(CONFIG_KERNEL_GZIP) := gz suffix-$(CONFIG_KERNEL_BZIP2) := bz2 suffix-$(CONFIG_KERNEL_LZMA) := lzma suffix-$(CONFIG_KERNEL_XZ) := xz suffix-$(CONFIG_KERNEL_LZO) := lzo +suffix-$(CONFIG_KERNEL_LZ4) := lz4 quiet_cmd_mkpiggy = MKPIGGY $@ cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 7cb56c6ca351..0319c88290a5 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -145,6 +145,10 @@ static int lines, cols; #include "../../../../lib/decompress_unlzo.c" #endif +#ifdef CONFIG_KERNEL_LZ4 +#include "../../../../lib/decompress_unlz4.c" +#endif + static void scroll(void) { int i; diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index a3a0ed80f17c..7d6ba9db1be9 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -3,8 +3,6 @@ # avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) -avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ - $(comma)4)$(comma)%ymm2,yes,no) obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o @@ -29,6 +27,7 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o +obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o # These modules require assembler to support AVX. ifeq ($(avx_supported),yes) @@ -42,10 +41,8 @@ endif # These modules require assembler to support AVX2. ifeq ($(avx2_supported),yes) - obj-$(CONFIG_CRYPTO_BLOWFISH_AVX2_X86_64) += blowfish-avx2.o obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o - obj-$(CONFIG_CRYPTO_TWOFISH_AVX2_X86_64) += twofish-avx2.o endif aes-i586-y := aes-i586-asm_32.o aes_glue.o @@ -73,10 +70,8 @@ ifeq ($(avx_supported),yes) endif ifeq ($(avx2_supported),yes) - blowfish-avx2-y := blowfish-avx2-asm_64.o blowfish_avx2_glue.o camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o - twofish-avx2-y := twofish-avx2-asm_64.o twofish_avx2_glue.o endif aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o @@ -87,3 +82,4 @@ crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o +crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o diff --git a/arch/x86/crypto/blowfish-avx2-asm_64.S b/arch/x86/crypto/blowfish-avx2-asm_64.S deleted file mode 100644 index 784452e0d05d..000000000000 --- a/arch/x86/crypto/blowfish-avx2-asm_64.S +++ /dev/null @@ -1,449 +0,0 @@ -/* - * x86_64/AVX2 assembler optimized version of Blowfish - * - * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ - -#include <linux/linkage.h> - -.file "blowfish-avx2-asm_64.S" - -.data -.align 32 - -.Lprefetch_mask: -.long 0*64 -.long 1*64 -.long 2*64 -.long 3*64 -.long 4*64 -.long 5*64 -.long 6*64 -.long 7*64 - -.Lbswap32_mask: -.long 0x00010203 -.long 0x04050607 -.long 0x08090a0b -.long 0x0c0d0e0f - -.Lbswap128_mask: - .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -.Lbswap_iv_mask: - .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0 - -.text -/* structure of crypto context */ -#define p 0 -#define s0 ((16 + 2) * 4) -#define s1 ((16 + 2 + (1 * 256)) * 4) -#define s2 ((16 + 2 + (2 * 256)) * 4) -#define s3 ((16 + 2 + (3 * 256)) * 4) - -/* register macros */ -#define CTX %rdi -#define RIO %rdx - -#define RS0 %rax -#define RS1 %r8 -#define RS2 %r9 -#define RS3 %r10 - -#define RLOOP %r11 -#define RLOOPd %r11d - -#define RXr0 %ymm8 -#define RXr1 %ymm9 -#define RXr2 %ymm10 -#define RXr3 %ymm11 -#define RXl0 %ymm12 -#define RXl1 %ymm13 -#define RXl2 %ymm14 -#define RXl3 %ymm15 - -/* temp regs */ -#define RT0 %ymm0 -#define RT0x %xmm0 -#define RT1 %ymm1 -#define RT1x %xmm1 -#define RIDX0 %ymm2 -#define RIDX1 %ymm3 -#define RIDX1x %xmm3 -#define RIDX2 %ymm4 -#define RIDX3 %ymm5 - -/* vpgatherdd mask and '-1' */ -#define RNOT %ymm6 - -/* byte mask, (-1 >> 24) */ -#define RBYTE %ymm7 - -/*********************************************************************** - * 32-way AVX2 blowfish - ***********************************************************************/ -#define F(xl, xr) \ - vpsrld $24, xl, RIDX0; \ - vpsrld $16, xl, RIDX1; \ - vpsrld $8, xl, RIDX2; \ - vpand RBYTE, RIDX1, RIDX1; \ - vpand RBYTE, RIDX2, RIDX2; \ - vpand RBYTE, xl, RIDX3; \ - \ - vpgatherdd RNOT, (RS0, RIDX0, 4), RT0; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - vpcmpeqd RIDX0, RIDX0, RIDX0; \ - \ - vpgatherdd RNOT, (RS1, RIDX1, 4), RT1; \ - vpcmpeqd RIDX1, RIDX1, RIDX1; \ - vpaddd RT0, RT1, RT0; \ - \ - vpgatherdd RIDX0, (RS2, RIDX2, 4), RT1; \ - vpxor RT0, RT1, RT0; \ - \ - vpgatherdd RIDX1, (RS3, RIDX3, 4), RT1; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - vpaddd RT0, RT1, RT0; \ - \ - vpxor RT0, xr, xr; - -#define add_roundkey(xl, nmem) \ - vpbroadcastd nmem, RT0; \ - vpxor RT0, xl ## 0, xl ## 0; \ - vpxor RT0, xl ## 1, xl ## 1; \ - vpxor RT0, xl ## 2, xl ## 2; \ - vpxor RT0, xl ## 3, xl ## 3; - -#define round_enc() \ - add_roundkey(RXr, p(CTX,RLOOP,4)); \ - F(RXl0, RXr0); \ - F(RXl1, RXr1); \ - F(RXl2, RXr2); \ - F(RXl3, RXr3); \ - \ - add_roundkey(RXl, p+4(CTX,RLOOP,4)); \ - F(RXr0, RXl0); \ - F(RXr1, RXl1); \ - F(RXr2, RXl2); \ - F(RXr3, RXl3); - -#define round_dec() \ - add_roundkey(RXr, p+4*2(CTX,RLOOP,4)); \ - F(RXl0, RXr0); \ - F(RXl1, RXr1); \ - F(RXl2, RXr2); \ - F(RXl3, RXr3); \ - \ - add_roundkey(RXl, p+4(CTX,RLOOP,4)); \ - F(RXr0, RXl0); \ - F(RXr1, RXl1); \ - F(RXr2, RXl2); \ - F(RXr3, RXl3); - -#define init_round_constants() \ - vpcmpeqd RNOT, RNOT, RNOT; \ - leaq s0(CTX), RS0; \ - leaq s1(CTX), RS1; \ - leaq s2(CTX), RS2; \ - leaq s3(CTX), RS3; \ - vpsrld $24, RNOT, RBYTE; - -#define transpose_2x2(x0, x1, t0) \ - vpunpckldq x0, x1, t0; \ - vpunpckhdq x0, x1, x1; \ - \ - vpunpcklqdq t0, x1, x0; \ - vpunpckhqdq t0, x1, x1; - -#define read_block(xl, xr) \ - vbroadcasti128 .Lbswap32_mask, RT1; \ - \ - vpshufb RT1, xl ## 0, xl ## 0; \ - vpshufb RT1, xr ## 0, xr ## 0; \ - vpshufb RT1, xl ## 1, xl ## 1; \ - vpshufb RT1, xr ## 1, xr ## 1; \ - vpshufb RT1, xl ## 2, xl ## 2; \ - vpshufb RT1, xr ## 2, xr ## 2; \ - vpshufb RT1, xl ## 3, xl ## 3; \ - vpshufb RT1, xr ## 3, xr ## 3; \ - \ - transpose_2x2(xl ## 0, xr ## 0, RT0); \ - transpose_2x2(xl ## 1, xr ## 1, RT0); \ - transpose_2x2(xl ## 2, xr ## 2, RT0); \ - transpose_2x2(xl ## 3, xr ## 3, RT0); - -#define write_block(xl, xr) \ - vbroadcasti128 .Lbswap32_mask, RT1; \ - \ - transpose_2x2(xl ## 0, xr ## 0, RT0); \ - transpose_2x2(xl ## 1, xr ## 1, RT0); \ - transpose_2x2(xl ## 2, xr ## 2, RT0); \ - transpose_2x2(xl ## 3, xr ## 3, RT0); \ - \ - vpshufb RT1, xl ## 0, xl ## 0; \ - vpshufb RT1, xr ## 0, xr ## 0; \ - vpshufb RT1, xl ## 1, xl ## 1; \ - vpshufb RT1, xr ## 1, xr ## 1; \ - vpshufb RT1, xl ## 2, xl ## 2; \ - vpshufb RT1, xr ## 2, xr ## 2; \ - vpshufb RT1, xl ## 3, xl ## 3; \ - vpshufb RT1, xr ## 3, xr ## 3; - -.align 8 -__blowfish_enc_blk32: - /* input: - * %rdi: ctx, CTX - * RXl0..4, RXr0..4: plaintext - * output: - * RXl0..4, RXr0..4: ciphertext (RXl <=> RXr swapped) - */ - init_round_constants(); - - read_block(RXl, RXr); - - movl $1, RLOOPd; - add_roundkey(RXl, p+4*(0)(CTX)); - -.align 4 -.L__enc_loop: - round_enc(); - - leal 2(RLOOPd), RLOOPd; - cmpl $17, RLOOPd; - jne .L__enc_loop; - - add_roundkey(RXr, p+4*(17)(CTX)); - - write_block(RXl, RXr); - - ret; -ENDPROC(__blowfish_enc_blk32) - -.align 8 -__blowfish_dec_blk32: - /* input: - * %rdi: ctx, CTX - * RXl0..4, RXr0..4: ciphertext - * output: - * RXl0..4, RXr0..4: plaintext (RXl <=> RXr swapped) - */ - init_round_constants(); - - read_block(RXl, RXr); - - movl $14, RLOOPd; - add_roundkey(RXl, p+4*(17)(CTX)); - -.align 4 -.L__dec_loop: - round_dec(); - - addl $-2, RLOOPd; - jns .L__dec_loop; - - add_roundkey(RXr, p+4*(0)(CTX)); - - write_block(RXl, RXr); - - ret; -ENDPROC(__blowfish_dec_blk32) - -ENTRY(blowfish_ecb_enc_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - */ - - vzeroupper; - - vmovdqu 0*32(%rdx), RXl0; - vmovdqu 1*32(%rdx), RXr0; - vmovdqu 2*32(%rdx), RXl1; - vmovdqu 3*32(%rdx), RXr1; - vmovdqu 4*32(%rdx), RXl2; - vmovdqu 5*32(%rdx), RXr2; - vmovdqu 6*32(%rdx), RXl3; - vmovdqu 7*32(%rdx), RXr3; - - call __blowfish_enc_blk32; - - vmovdqu RXr0, 0*32(%rsi); - vmovdqu RXl0, 1*32(%rsi); - vmovdqu RXr1, 2*32(%rsi); - vmovdqu RXl1, 3*32(%rsi); - vmovdqu RXr2, 4*32(%rsi); - vmovdqu RXl2, 5*32(%rsi); - vmovdqu RXr3, 6*32(%rsi); - vmovdqu RXl3, 7*32(%rsi); - - vzeroupper; - - ret; -ENDPROC(blowfish_ecb_enc_32way) - -ENTRY(blowfish_ecb_dec_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - */ - - vzeroupper; - - vmovdqu 0*32(%rdx), RXl0; - vmovdqu 1*32(%rdx), RXr0; - vmovdqu 2*32(%rdx), RXl1; - vmovdqu 3*32(%rdx), RXr1; - vmovdqu 4*32(%rdx), RXl2; - vmovdqu 5*32(%rdx), RXr2; - vmovdqu 6*32(%rdx), RXl3; - vmovdqu 7*32(%rdx), RXr3; - - call __blowfish_dec_blk32; - - vmovdqu RXr0, 0*32(%rsi); - vmovdqu RXl0, 1*32(%rsi); - vmovdqu RXr1, 2*32(%rsi); - vmovdqu RXl1, 3*32(%rsi); - vmovdqu RXr2, 4*32(%rsi); - vmovdqu RXl2, 5*32(%rsi); - vmovdqu RXr3, 6*32(%rsi); - vmovdqu RXl3, 7*32(%rsi); - - vzeroupper; - - ret; -ENDPROC(blowfish_ecb_dec_32way) - -ENTRY(blowfish_cbc_dec_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - */ - - vzeroupper; - - vmovdqu 0*32(%rdx), RXl0; - vmovdqu 1*32(%rdx), RXr0; - vmovdqu 2*32(%rdx), RXl1; - vmovdqu 3*32(%rdx), RXr1; - vmovdqu 4*32(%rdx), RXl2; - vmovdqu 5*32(%rdx), RXr2; - vmovdqu 6*32(%rdx), RXl3; - vmovdqu 7*32(%rdx), RXr3; - - call __blowfish_dec_blk32; - - /* xor with src */ - vmovq (%rdx), RT0x; - vpshufd $0x4f, RT0x, RT0x; - vinserti128 $1, 8(%rdx), RT0, RT0; - vpxor RT0, RXr0, RXr0; - vpxor 0*32+24(%rdx), RXl0, RXl0; - vpxor 1*32+24(%rdx), RXr1, RXr1; - vpxor 2*32+24(%rdx), RXl1, RXl1; - vpxor 3*32+24(%rdx), RXr2, RXr2; - vpxor 4*32+24(%rdx), RXl2, RXl2; - vpxor 5*32+24(%rdx), RXr3, RXr3; - vpxor 6*32+24(%rdx), RXl3, RXl3; - - vmovdqu RXr0, (0*32)(%rsi); - vmovdqu RXl0, (1*32)(%rsi); - vmovdqu RXr1, (2*32)(%rsi); - vmovdqu RXl1, (3*32)(%rsi); - vmovdqu RXr2, (4*32)(%rsi); - vmovdqu RXl2, (5*32)(%rsi); - vmovdqu RXr3, (6*32)(%rsi); - vmovdqu RXl3, (7*32)(%rsi); - - vzeroupper; - - ret; -ENDPROC(blowfish_cbc_dec_32way) - -ENTRY(blowfish_ctr_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (big endian, 64bit) - */ - - vzeroupper; - - vpcmpeqd RT0, RT0, RT0; - vpsrldq $8, RT0, RT0; /* a: -1, b: 0, c: -1, d: 0 */ - - vpcmpeqd RT1x, RT1x, RT1x; - vpaddq RT1x, RT1x, RT1x; /* a: -2, b: -2 */ - vpxor RIDX0, RIDX0, RIDX0; - vinserti128 $1, RT1x, RIDX0, RIDX0; /* a: 0, b: 0, c: -2, d: -2 */ - - vpaddq RIDX0, RT0, RT0; /* a: -1, b: 0, c: -3, d: -2 */ - - vpcmpeqd RT1, RT1, RT1; - vpaddq RT1, RT1, RT1; /* a: -2, b: -2, c: -2, d: -2 */ - vpaddq RT1, RT1, RIDX2; /* a: -4, b: -4, c: -4, d: -4 */ - - vbroadcasti128 .Lbswap_iv_mask, RIDX0; - vbroadcasti128 .Lbswap128_mask, RIDX1; - - /* load IV and byteswap */ - vmovq (%rcx), RT1x; - vinserti128 $1, RT1x, RT1, RT1; /* a: BE, b: 0, c: BE, d: 0 */ - vpshufb RIDX0, RT1, RT1; /* a: LE, b: LE, c: LE, d: LE */ - - /* construct IVs */ - vpsubq RT0, RT1, RT1; /* a: le1, b: le0, c: le3, d: le2 */ - vpshufb RIDX1, RT1, RXl0; /* a: be0, b: be1, c: be2, d: be3 */ - vpsubq RIDX2, RT1, RT1; /* le5, le4, le7, le6 */ - vpshufb RIDX1, RT1, RXr0; /* be4, be5, be6, be7 */ - vpsubq RIDX2, RT1, RT1; - vpshufb RIDX1, RT1, RXl1; - vpsubq RIDX2, RT1, RT1; - vpshufb RIDX1, RT1, RXr1; - vpsubq RIDX2, RT1, RT1; - vpshufb RIDX1, RT1, RXl2; - vpsubq RIDX2, RT1, RT1; - vpshufb RIDX1, RT1, RXr2; - vpsubq RIDX2, RT1, RT1; - vpshufb RIDX1, RT1, RXl3; - vpsubq RIDX2, RT1, RT1; - vpshufb RIDX1, RT1, RXr3; - - /* store last IV */ - vpsubq RIDX2, RT1, RT1; /* a: le33, b: le32, ... */ - vpshufb RIDX1x, RT1x, RT1x; /* a: be32, ... */ - vmovq RT1x, (%rcx); - - call __blowfish_enc_blk32; - - /* dst = src ^ iv */ - vpxor 0*32(%rdx), RXr0, RXr0; - vpxor 1*32(%rdx), RXl0, RXl0; - vpxor 2*32(%rdx), RXr1, RXr1; - vpxor 3*32(%rdx), RXl1, RXl1; - vpxor 4*32(%rdx), RXr2, RXr2; - vpxor 5*32(%rdx), RXl2, RXl2; - vpxor 6*32(%rdx), RXr3, RXr3; - vpxor 7*32(%rdx), RXl3, RXl3; - vmovdqu RXr0, (0*32)(%rsi); - vmovdqu RXl0, (1*32)(%rsi); - vmovdqu RXr1, (2*32)(%rsi); - vmovdqu RXl1, (3*32)(%rsi); - vmovdqu RXr2, (4*32)(%rsi); - vmovdqu RXl2, (5*32)(%rsi); - vmovdqu RXr3, (6*32)(%rsi); - vmovdqu RXl3, (7*32)(%rsi); - - vzeroupper; - - ret; -ENDPROC(blowfish_ctr_32way) diff --git a/arch/x86/crypto/blowfish_avx2_glue.c b/arch/x86/crypto/blowfish_avx2_glue.c deleted file mode 100644 index 4417e9aea78d..000000000000 --- a/arch/x86/crypto/blowfish_avx2_glue.c +++ /dev/null @@ -1,585 +0,0 @@ -/* - * Glue Code for x86_64/AVX2 assembler optimized version of Blowfish - * - * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> - * - * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: - * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> - * CTR part based on code (crypto/ctr.c) by: - * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> -#include <linux/err.h> -#include <crypto/algapi.h> -#include <crypto/blowfish.h> -#include <crypto/cryptd.h> -#include <crypto/ctr.h> -#include <asm/i387.h> -#include <asm/xcr.h> -#include <asm/xsave.h> -#include <asm/crypto/blowfish.h> -#include <asm/crypto/ablk_helper.h> -#include <crypto/scatterwalk.h> - -#define BF_AVX2_PARALLEL_BLOCKS 32 - -/* 32-way AVX2 parallel cipher functions */ -asmlinkage void blowfish_ecb_enc_32way(struct bf_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void blowfish_ecb_dec_32way(struct bf_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void blowfish_cbc_dec_32way(struct bf_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void blowfish_ctr_32way(struct bf_ctx *ctx, u8 *dst, const u8 *src, - __be64 *iv); - -static inline bool bf_fpu_begin(bool fpu_enabled, unsigned int nbytes) -{ - if (fpu_enabled) - return true; - - /* FPU is only used when chunk to be processed is large enough, so - * do not enable FPU until it is necessary. - */ - if (nbytes < BF_BLOCK_SIZE * BF_AVX2_PARALLEL_BLOCKS) - return false; - - kernel_fpu_begin(); - return true; -} - -static inline void bf_fpu_end(bool fpu_enabled) -{ - if (fpu_enabled) - kernel_fpu_end(); -} - -static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - bool enc) -{ - bool fpu_enabled = false; - struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - const unsigned int bsize = BF_BLOCK_SIZE; - unsigned int nbytes; - int err; - - err = blkcipher_walk_virt(desc, walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - while ((nbytes = walk->nbytes)) { - u8 *wsrc = walk->src.virt.addr; - u8 *wdst = walk->dst.virt.addr; - - fpu_enabled = bf_fpu_begin(fpu_enabled, nbytes); - - /* Process multi-block AVX2 batch */ - if (nbytes >= bsize * BF_AVX2_PARALLEL_BLOCKS) { - do { - if (enc) - blowfish_ecb_enc_32way(ctx, wdst, wsrc); - else - blowfish_ecb_dec_32way(ctx, wdst, wsrc); - - wsrc += bsize * BF_AVX2_PARALLEL_BLOCKS; - wdst += bsize * BF_AVX2_PARALLEL_BLOCKS; - nbytes -= bsize * BF_AVX2_PARALLEL_BLOCKS; - } while (nbytes >= bsize * BF_AVX2_PARALLEL_BLOCKS); - - if (nbytes < bsize) - goto done; - } - - /* Process multi-block batch */ - if (nbytes >= bsize * BF_PARALLEL_BLOCKS) { - do { - if (enc) - blowfish_enc_blk_4way(ctx, wdst, wsrc); - else - blowfish_dec_blk_4way(ctx, wdst, wsrc); - - wsrc += bsize * BF_PARALLEL_BLOCKS; - wdst += bsize * BF_PARALLEL_BLOCKS; - nbytes -= bsize * BF_PARALLEL_BLOCKS; - } while (nbytes >= bsize * BF_PARALLEL_BLOCKS); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - do { - if (enc) - blowfish_enc_blk(ctx, wdst, wsrc); - else - blowfish_dec_blk(ctx, wdst, wsrc); - - wsrc += bsize; - wdst += bsize; - nbytes -= bsize; - } while (nbytes >= bsize); - -done: - err = blkcipher_walk_done(desc, walk, nbytes); - } - - bf_fpu_end(fpu_enabled); - return err; -} - -static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_crypt(desc, &walk, true); -} - -static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_crypt(desc, &walk, false); -} - -static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, - struct blkcipher_walk *walk) -{ - struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - unsigned int bsize = BF_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - u64 *iv = (u64 *)walk->iv; - - do { - *dst = *src ^ *iv; - blowfish_enc_blk(ctx, (u8 *)dst, (u8 *)dst); - iv = dst; - - src += 1; - dst += 1; - nbytes -= bsize; - } while (nbytes >= bsize); - - *(u64 *)walk->iv = *iv; - return nbytes; -} - -static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - struct blkcipher_walk walk; - int err; - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - - while ((nbytes = walk.nbytes)) { - nbytes = __cbc_encrypt(desc, &walk); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - - return err; -} - -static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, - struct blkcipher_walk *walk) -{ - struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - const unsigned int bsize = BF_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - u64 last_iv; - int i; - - /* Start of the last block. */ - src += nbytes / bsize - 1; - dst += nbytes / bsize - 1; - - last_iv = *src; - - /* Process multi-block AVX2 batch */ - if (nbytes >= bsize * BF_AVX2_PARALLEL_BLOCKS) { - do { - nbytes -= bsize * (BF_AVX2_PARALLEL_BLOCKS - 1); - src -= BF_AVX2_PARALLEL_BLOCKS - 1; - dst -= BF_AVX2_PARALLEL_BLOCKS - 1; - - blowfish_cbc_dec_32way(ctx, (u8 *)dst, (u8 *)src); - - nbytes -= bsize; - if (nbytes < bsize) - goto done; - - *dst ^= *(src - 1); - src -= 1; - dst -= 1; - } while (nbytes >= bsize * BF_AVX2_PARALLEL_BLOCKS); - - if (nbytes < bsize) - goto done; - } - - /* Process multi-block batch */ - if (nbytes >= bsize * BF_PARALLEL_BLOCKS) { - u64 ivs[BF_PARALLEL_BLOCKS - 1]; - - do { - nbytes -= bsize * (BF_PARALLEL_BLOCKS - 1); - src -= BF_PARALLEL_BLOCKS - 1; - dst -= BF_PARALLEL_BLOCKS - 1; - - for (i = 0; i < BF_PARALLEL_BLOCKS - 1; i++) - ivs[i] = src[i]; - - blowfish_dec_blk_4way(ctx, (u8 *)dst, (u8 *)src); - - for (i = 0; i < BF_PARALLEL_BLOCKS - 1; i++) - dst[i + 1] ^= ivs[i]; - - nbytes -= bsize; - if (nbytes < bsize) - goto done; - - *dst ^= *(src - 1); - src -= 1; - dst -= 1; - } while (nbytes >= bsize * BF_PARALLEL_BLOCKS); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - for (;;) { - blowfish_dec_blk(ctx, (u8 *)dst, (u8 *)src); - - nbytes -= bsize; - if (nbytes < bsize) - break; - - *dst ^= *(src - 1); - src -= 1; - dst -= 1; - } - -done: - *dst ^= *(u64 *)walk->iv; - *(u64 *)walk->iv = last_iv; - - return nbytes; -} - -static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - bool fpu_enabled = false; - struct blkcipher_walk walk; - int err; - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - while ((nbytes = walk.nbytes)) { - fpu_enabled = bf_fpu_begin(fpu_enabled, nbytes); - nbytes = __cbc_decrypt(desc, &walk); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - - bf_fpu_end(fpu_enabled); - return err; -} - -static void ctr_crypt_final(struct blkcipher_desc *desc, - struct blkcipher_walk *walk) -{ - struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - u8 *ctrblk = walk->iv; - u8 keystream[BF_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; - - blowfish_enc_blk(ctx, keystream, ctrblk); - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); - - crypto_inc(ctrblk, BF_BLOCK_SIZE); -} - -static unsigned int __ctr_crypt(struct blkcipher_desc *desc, - struct blkcipher_walk *walk) -{ - struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - unsigned int bsize = BF_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - int i; - - /* Process multi-block AVX2 batch */ - if (nbytes >= bsize * BF_AVX2_PARALLEL_BLOCKS) { - do { - blowfish_ctr_32way(ctx, (u8 *)dst, (u8 *)src, - (__be64 *)walk->iv); - - src += BF_AVX2_PARALLEL_BLOCKS; - dst += BF_AVX2_PARALLEL_BLOCKS; - nbytes -= bsize * BF_AVX2_PARALLEL_BLOCKS; - } while (nbytes >= bsize * BF_AVX2_PARALLEL_BLOCKS); - - if (nbytes < bsize) - goto done; - } - - /* Process four block batch */ - if (nbytes >= bsize * BF_PARALLEL_BLOCKS) { - __be64 ctrblocks[BF_PARALLEL_BLOCKS]; - u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); - - do { - /* create ctrblks for parallel encrypt */ - for (i = 0; i < BF_PARALLEL_BLOCKS; i++) { - if (dst != src) - dst[i] = src[i]; - - ctrblocks[i] = cpu_to_be64(ctrblk++); - } - - blowfish_enc_blk_xor_4way(ctx, (u8 *)dst, - (u8 *)ctrblocks); - - src += BF_PARALLEL_BLOCKS; - dst += BF_PARALLEL_BLOCKS; - nbytes -= bsize * BF_PARALLEL_BLOCKS; - } while (nbytes >= bsize * BF_PARALLEL_BLOCKS); - - *(__be64 *)walk->iv = cpu_to_be64(ctrblk); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - do { - u64 ctrblk; - - if (dst != src) - *dst = *src; - - ctrblk = *(u64 *)walk->iv; - be64_add_cpu((__be64 *)walk->iv, 1); - - blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)&ctrblk); - - src += 1; - dst += 1; - } while ((nbytes -= bsize) >= bsize); - -done: - return nbytes; -} - -static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - bool fpu_enabled = false; - struct blkcipher_walk walk; - int err; - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) { - fpu_enabled = bf_fpu_begin(fpu_enabled, nbytes); - nbytes = __ctr_crypt(desc, &walk); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - - bf_fpu_end(fpu_enabled); - - if (walk.nbytes) { - ctr_crypt_final(desc, &walk); - err = blkcipher_walk_done(desc, &walk, 0); - } - - return err; -} - -static struct crypto_alg bf_algs[6] = { { - .cra_name = "__ecb-blowfish-avx2", - .cra_driver_name = "__driver-ecb-blowfish-avx2", - .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = BF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct bf_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = BF_MIN_KEY_SIZE, - .max_keysize = BF_MAX_KEY_SIZE, - .setkey = blowfish_setkey, - .encrypt = ecb_encrypt, - .decrypt = ecb_decrypt, - }, - }, -}, { - .cra_name = "__cbc-blowfish-avx2", - .cra_driver_name = "__driver-cbc-blowfish-avx2", - .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = BF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct bf_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = BF_MIN_KEY_SIZE, - .max_keysize = BF_MAX_KEY_SIZE, - .setkey = blowfish_setkey, - .encrypt = cbc_encrypt, - .decrypt = cbc_decrypt, - }, - }, -}, { - .cra_name = "__ctr-blowfish-avx2", - .cra_driver_name = "__driver-ctr-blowfish-avx2", - .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct bf_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = BF_MIN_KEY_SIZE, - .max_keysize = BF_MAX_KEY_SIZE, - .ivsize = BF_BLOCK_SIZE, - .setkey = blowfish_setkey, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, - }, - }, -}, { - .cra_name = "ecb(blowfish)", - .cra_driver_name = "ecb-blowfish-avx2", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = BF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct async_helper_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = ablk_init, - .cra_exit = ablk_exit, - .cra_u = { - .ablkcipher = { - .min_keysize = BF_MIN_KEY_SIZE, - .max_keysize = BF_MAX_KEY_SIZE, - .setkey = ablk_set_key, - .encrypt = ablk_encrypt, - .decrypt = ablk_decrypt, - }, - }, -}, { - .cra_name = "cbc(blowfish)", - .cra_driver_name = "cbc-blowfish-avx2", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = BF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct async_helper_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = ablk_init, - .cra_exit = ablk_exit, - .cra_u = { - .ablkcipher = { - .min_keysize = BF_MIN_KEY_SIZE, - .max_keysize = BF_MAX_KEY_SIZE, - .ivsize = BF_BLOCK_SIZE, - .setkey = ablk_set_key, - .encrypt = __ablk_encrypt, - .decrypt = ablk_decrypt, - }, - }, -}, { - .cra_name = "ctr(blowfish)", - .cra_driver_name = "ctr-blowfish-avx2", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct async_helper_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = ablk_init, - .cra_exit = ablk_exit, - .cra_u = { - .ablkcipher = { - .min_keysize = BF_MIN_KEY_SIZE, - .max_keysize = BF_MAX_KEY_SIZE, - .ivsize = BF_BLOCK_SIZE, - .setkey = ablk_set_key, - .encrypt = ablk_encrypt, - .decrypt = ablk_encrypt, - .geniv = "chainiv", - }, - }, -} }; - - -static int __init init(void) -{ - u64 xcr0; - - if (!cpu_has_avx2 || !cpu_has_osxsave) { - pr_info("AVX2 instructions are not detected.\n"); - return -ENODEV; - } - - xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); - if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { - pr_info("AVX detected but unusable.\n"); - return -ENODEV; - } - - return crypto_register_algs(bf_algs, ARRAY_SIZE(bf_algs)); -} - -static void __exit fini(void) -{ - crypto_unregister_algs(bf_algs, ARRAY_SIZE(bf_algs)); -} - -module_init(init); -module_exit(fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Blowfish Cipher Algorithm, AVX2 optimized"); -MODULE_ALIAS("blowfish"); -MODULE_ALIAS("blowfish-asm"); diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c index 3548d76dbaa9..50ec333b70e6 100644 --- a/arch/x86/crypto/blowfish_glue.c +++ b/arch/x86/crypto/blowfish_glue.c @@ -1,7 +1,7 @@ /* * Glue Code for assembler optimized version of Blowfish * - * Copyright © 2011-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> + * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> @@ -32,24 +32,40 @@ #include <linux/module.h> #include <linux/types.h> #include <crypto/algapi.h> -#include <asm/crypto/blowfish.h> /* regular block cipher functions */ asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src, bool xor); -EXPORT_SYMBOL_GPL(__blowfish_enc_blk); - asmlinkage void blowfish_dec_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src); -EXPORT_SYMBOL_GPL(blowfish_dec_blk); /* 4-way parallel cipher functions */ asmlinkage void __blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst, const u8 *src, bool xor); -EXPORT_SYMBOL_GPL(__blowfish_enc_blk_4way); - asmlinkage void blowfish_dec_blk_4way(struct bf_ctx *ctx, u8 *dst, const u8 *src); -EXPORT_SYMBOL_GPL(blowfish_dec_blk_4way); + +static inline void blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src) +{ + __blowfish_enc_blk(ctx, dst, src, false); +} + +static inline void blowfish_enc_blk_xor(struct bf_ctx *ctx, u8 *dst, + const u8 *src) +{ + __blowfish_enc_blk(ctx, dst, src, true); +} + +static inline void blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst, + const u8 *src) +{ + __blowfish_enc_blk_4way(ctx, dst, src, false); +} + +static inline void blowfish_enc_blk_xor_4way(struct bf_ctx *ctx, u8 *dst, + const u8 *src) +{ + __blowfish_enc_blk_4way(ctx, dst, src, true); +} static void blowfish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 91a1878fcc3e..0e0b8863a34b 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -51,16 +51,6 @@ #define ymm14_x xmm14 #define ymm15_x xmm15 -/* - * AES-NI instructions do not support ymmX registers, so we need splitting and - * merging. - */ -#define vaesenclast256(zero, yreg, tmp) \ - vextracti128 $1, yreg, tmp##_x; \ - vaesenclast zero##_x, yreg##_x, yreg##_x; \ - vaesenclast zero##_x, tmp##_x, tmp##_x; \ - vinserti128 $1, tmp##_x, yreg, yreg; - /********************************************************************** 32-way camellia **********************************************************************/ @@ -79,46 +69,70 @@ * S-function with AES subbytes \ */ \ vbroadcasti128 .Linv_shift_row, t4; \ - vpbroadcastb .L0f0f0f0f, t7; \ - vbroadcasti128 .Lpre_tf_lo_s1, t0; \ - vbroadcasti128 .Lpre_tf_hi_s1, t1; \ + vpbroadcastd .L0f0f0f0f, t7; \ + vbroadcasti128 .Lpre_tf_lo_s1, t5; \ + vbroadcasti128 .Lpre_tf_hi_s1, t6; \ + vbroadcasti128 .Lpre_tf_lo_s4, t2; \ + vbroadcasti128 .Lpre_tf_hi_s4, t3; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ - vpshufb t4, x1, x1; \ - vpshufb t4, x4, x4; \ - vpshufb t4, x2, x2; \ - vpshufb t4, x5, x5; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ + vpshufb t4, x2, x2; \ + vpshufb t4, x5, x5; \ + vpshufb t4, x1, x1; \ + vpshufb t4, x4, x4; \ \ /* prefilter sboxes 1, 2 and 3 */ \ - vbroadcasti128 .Lpre_tf_lo_s4, t2; \ - vbroadcasti128 .Lpre_tf_hi_s4, t3; \ - filter_8bit(x0, t0, t1, t7, t6); \ - filter_8bit(x7, t0, t1, t7, t6); \ - filter_8bit(x1, t0, t1, t7, t6); \ - filter_8bit(x4, t0, t1, t7, t6); \ - filter_8bit(x2, t0, t1, t7, t6); \ - filter_8bit(x5, t0, t1, t7, t6); \ - \ /* prefilter sbox 4 */ \ + filter_8bit(x0, t5, t6, t7, t4); \ + filter_8bit(x7, t5, t6, t7, t4); \ + vextracti128 $1, x0, t0##_x; \ + vextracti128 $1, x7, t1##_x; \ + filter_8bit(x3, t2, t3, t7, t4); \ + filter_8bit(x6, t2, t3, t7, t4); \ + vextracti128 $1, x3, t3##_x; \ + vextracti128 $1, x6, t2##_x; \ + filter_8bit(x2, t5, t6, t7, t4); \ + filter_8bit(x5, t5, t6, t7, t4); \ + filter_8bit(x1, t5, t6, t7, t4); \ + filter_8bit(x4, t5, t6, t7, t4); \ + \ vpxor t4##_x, t4##_x, t4##_x; \ - filter_8bit(x3, t2, t3, t7, t6); \ - filter_8bit(x6, t2, t3, t7, t6); \ \ /* AES subbytes + AES shift rows */ \ + vextracti128 $1, x2, t6##_x; \ + vextracti128 $1, x5, t5##_x; \ + vaesenclast t4##_x, x0##_x, x0##_x; \ + vaesenclast t4##_x, t0##_x, t0##_x; \ + vinserti128 $1, t0##_x, x0, x0; \ + vaesenclast t4##_x, x7##_x, x7##_x; \ + vaesenclast t4##_x, t1##_x, t1##_x; \ + vinserti128 $1, t1##_x, x7, x7; \ + vaesenclast t4##_x, x3##_x, x3##_x; \ + vaesenclast t4##_x, t3##_x, t3##_x; \ + vinserti128 $1, t3##_x, x3, x3; \ + vaesenclast t4##_x, x6##_x, x6##_x; \ + vaesenclast t4##_x, t2##_x, t2##_x; \ + vinserti128 $1, t2##_x, x6, x6; \ + vextracti128 $1, x1, t3##_x; \ + vextracti128 $1, x4, t2##_x; \ vbroadcasti128 .Lpost_tf_lo_s1, t0; \ vbroadcasti128 .Lpost_tf_hi_s1, t1; \ - vaesenclast256(t4, x0, t5); \ - vaesenclast256(t4, x7, t5); \ - vaesenclast256(t4, x1, t5); \ - vaesenclast256(t4, x4, t5); \ - vaesenclast256(t4, x2, t5); \ - vaesenclast256(t4, x5, t5); \ - vaesenclast256(t4, x3, t5); \ - vaesenclast256(t4, x6, t5); \ + vaesenclast t4##_x, x2##_x, x2##_x; \ + vaesenclast t4##_x, t6##_x, t6##_x; \ + vinserti128 $1, t6##_x, x2, x2; \ + vaesenclast t4##_x, x5##_x, x5##_x; \ + vaesenclast t4##_x, t5##_x, t5##_x; \ + vinserti128 $1, t5##_x, x5, x5; \ + vaesenclast t4##_x, x1##_x, x1##_x; \ + vaesenclast t4##_x, t3##_x, t3##_x; \ + vinserti128 $1, t3##_x, x1, x1; \ + vaesenclast t4##_x, x4##_x, x4##_x; \ + vaesenclast t4##_x, t2##_x, t2##_x; \ + vinserti128 $1, t2##_x, x4, x4; \ \ /* postfilter sboxes 1 and 4 */ \ vbroadcasti128 .Lpost_tf_lo_s3, t2; \ @@ -139,22 +153,12 @@ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ + vpxor t7, t7, t7; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ + vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ - vpsrldq $4, t0, t4; \ - vpsrldq $5, t0, t5; \ - vpsrldq $6, t0, t6; \ - vpsrldq $7, t0, t7; \ - vpbroadcastb t0##_x, t0; \ - vpbroadcastb t1##_x, t1; \ - vpbroadcastb t2##_x, t2; \ - vpbroadcastb t3##_x, t3; \ - vpbroadcastb t4##_x, t4; \ - vpbroadcastb t6##_x, t6; \ - vpbroadcastb t5##_x, t5; \ - vpbroadcastb t7##_x, t7; \ \ /* P-function */ \ vpxor x5, x0, x0; \ @@ -162,11 +166,21 @@ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ + vpshufb t7, t2, t2; \ + vpsrldq $4, t0, t4; \ + vpshufb t7, t3, t3; \ + vpsrldq $5, t0, t5; \ + vpshufb t7, t4, t4; \ + \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ + vpsrldq $6, t0, t6; \ + vpshufb t7, t5, t5; \ + vpshufb t7, t6, t6; \ + \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ @@ -179,12 +193,16 @@ \ /* Add key material and result to CD (x becomes new CD) */ \ \ - vpxor t7, x0, x0; \ - vpxor 4 * 32(mem_cd), x0, x0; \ - \ vpxor t6, x1, x1; \ vpxor 5 * 32(mem_cd), x1, x1; \ \ + vpsrldq $7, t0, t6; \ + vpshufb t7, t0, t0; \ + vpshufb t7, t6, t7; \ + \ + vpxor t7, x0, x0; \ + vpxor 4 * 32(mem_cd), x0, x0; \ + \ vpxor t5, x2, x2; \ vpxor 6 * 32(mem_cd), x2, x2; \ \ @@ -204,7 +222,7 @@ vpxor 3 * 32(mem_cd), x7, x7; /* - * Size optimization... with inlined roundsm16 binary would be over 5 times + * Size optimization... with inlined roundsm32 binary would be over 5 times * larger and would only marginally faster. */ .align 8 @@ -324,13 +342,13 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) */ \ vpbroadcastd kll, t0; /* only lowest 32-bit used */ \ vpxor tt0, tt0, tt0; \ - vpbroadcastb t0##_x, t3; \ + vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t2; \ + vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t1; \ + vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t0; \ + vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ @@ -340,6 +358,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ + vpbroadcastd krr, t0; /* only lowest 32-bit used */ \ vmovdqu l4, 4 * 32(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 32(l); \ @@ -354,14 +373,13 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) * rl ^= t2; \ */ \ \ - vpbroadcastd krr, t0; /* only lowest 32-bit used */ \ - vpbroadcastb t0##_x, t3; \ + vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t2; \ + vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t1; \ + vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t0; \ + vpshufb tt0, t0, t0; \ \ vpor 4 * 32(r), t0, t0; \ vpor 5 * 32(r), t1, t1; \ @@ -373,6 +391,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) vpxor 2 * 32(r), t2, t2; \ vpxor 3 * 32(r), t3, t3; \ vmovdqu t0, 0 * 32(r); \ + vpbroadcastd krl, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 1 * 32(r); \ vmovdqu t2, 2 * 32(r); \ vmovdqu t3, 3 * 32(r); \ @@ -382,14 +401,13 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ - vpbroadcastd krl, t0; /* only lowest 32-bit used */ \ - vpbroadcastb t0##_x, t3; \ + vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t2; \ + vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t1; \ + vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t0; \ + vpshufb tt0, t0, t0; \ \ vpand 0 * 32(r), t0, t0; \ vpand 1 * 32(r), t1, t1; \ @@ -403,6 +421,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) vpxor 6 * 32(r), t2, t2; \ vpxor 7 * 32(r), t3, t3; \ vmovdqu t0, 4 * 32(r); \ + vpbroadcastd klr, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 5 * 32(r); \ vmovdqu t2, 6 * 32(r); \ vmovdqu t3, 7 * 32(r); \ @@ -413,14 +432,13 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) * ll ^= t0; \ */ \ \ - vpbroadcastd klr, t0; /* only lowest 32-bit used */ \ - vpbroadcastb t0##_x, t3; \ + vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t2; \ + vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t1; \ + vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ - vpbroadcastb t0##_x, t0; \ + vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S new file mode 100644 index 000000000000..35e97569d05f --- /dev/null +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -0,0 +1,643 @@ +######################################################################## +# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions +# +# Copyright (c) 2013, Intel Corporation +# +# Authors: +# Erdinc Ozturk <erdinc.ozturk@intel.com> +# Vinodh Gopal <vinodh.gopal@intel.com> +# James Guilford <james.guilford@intel.com> +# Tim Chen <tim.c.chen@linux.intel.com> +# +# This software is available to you under a choice of one of two +# licenses. You may choose to be licensed under the terms of the GNU +# General Public License (GPL) Version 2, available from the file +# COPYING in the main directory of this source tree, or the +# OpenIB.org BSD license below: +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# * Neither the name of the Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# +# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +######################################################################## +# Function API: +# UINT16 crc_t10dif_pcl( +# UINT16 init_crc, //initial CRC value, 16 bits +# const unsigned char *buf, //buffer pointer to calculate CRC on +# UINT64 len //buffer length in bytes (64-bit data) +# ); +# +# Reference paper titled "Fast CRC Computation for Generic +# Polynomials Using PCLMULQDQ Instruction" +# URL: http://www.intel.com/content/dam/www/public/us/en/documents +# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +# +# + +#include <linux/linkage.h> + +.text + +#define arg1 %rdi +#define arg2 %rsi +#define arg3 %rdx + +#define arg1_low32 %edi + +ENTRY(crc_t10dif_pcl) +.align 16 + + # adjust the 16-bit initial_crc value, scale it to 32 bits + shl $16, arg1_low32 + + # Allocate Stack Space + mov %rsp, %rcx + sub $16*2, %rsp + # align stack to 16 byte boundary + and $~(0x10 - 1), %rsp + + # check if smaller than 256 + cmp $256, arg3 + + # for sizes less than 128, we can't fold 64B at a time... + jl _less_than_128 + + + # load the initial crc value + movd arg1_low32, %xmm10 # initial crc + + # crc value does not need to be byte-reflected, but it needs + # to be moved to the high part of the register. + # because data will be byte-reflected and will align with + # initial crc at correct place. + pslldq $12, %xmm10 + + movdqa SHUF_MASK(%rip), %xmm11 + # receive the initial 64B data, xor the initial crc value + movdqu 16*0(arg2), %xmm0 + movdqu 16*1(arg2), %xmm1 + movdqu 16*2(arg2), %xmm2 + movdqu 16*3(arg2), %xmm3 + movdqu 16*4(arg2), %xmm4 + movdqu 16*5(arg2), %xmm5 + movdqu 16*6(arg2), %xmm6 + movdqu 16*7(arg2), %xmm7 + + pshufb %xmm11, %xmm0 + # XOR the initial_crc value + pxor %xmm10, %xmm0 + pshufb %xmm11, %xmm1 + pshufb %xmm11, %xmm2 + pshufb %xmm11, %xmm3 + pshufb %xmm11, %xmm4 + pshufb %xmm11, %xmm5 + pshufb %xmm11, %xmm6 + pshufb %xmm11, %xmm7 + + movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4 + #imm value of pclmulqdq instruction + #will determine which constant to use + + ################################################################# + # we subtract 256 instead of 128 to save one instruction from the loop + sub $256, arg3 + + # at this section of the code, there is 64*x+y (0<=y<64) bytes of + # buffer. The _fold_64_B_loop will fold 64B at a time + # until we have 64+y Bytes of buffer + + + # fold 64B at a time. This section of the code folds 4 xmm + # registers in parallel +_fold_64_B_loop: + + # update the buffer pointer + add $128, arg2 # buf += 64# + + movdqu 16*0(arg2), %xmm9 + movdqu 16*1(arg2), %xmm12 + pshufb %xmm11, %xmm9 + pshufb %xmm11, %xmm12 + movdqa %xmm0, %xmm8 + movdqa %xmm1, %xmm13 + pclmulqdq $0x0 , %xmm10, %xmm0 + pclmulqdq $0x11, %xmm10, %xmm8 + pclmulqdq $0x0 , %xmm10, %xmm1 + pclmulqdq $0x11, %xmm10, %xmm13 + pxor %xmm9 , %xmm0 + xorps %xmm8 , %xmm0 + pxor %xmm12, %xmm1 + xorps %xmm13, %xmm1 + + movdqu 16*2(arg2), %xmm9 + movdqu 16*3(arg2), %xmm12 + pshufb %xmm11, %xmm9 + pshufb %xmm11, %xmm12 + movdqa %xmm2, %xmm8 + movdqa %xmm3, %xmm13 + pclmulqdq $0x0, %xmm10, %xmm2 + pclmulqdq $0x11, %xmm10, %xmm8 + pclmulqdq $0x0, %xmm10, %xmm3 + pclmulqdq $0x11, %xmm10, %xmm13 + pxor %xmm9 , %xmm2 + xorps %xmm8 , %xmm2 + pxor %xmm12, %xmm3 + xorps %xmm13, %xmm3 + + movdqu 16*4(arg2), %xmm9 + movdqu 16*5(arg2), %xmm12 + pshufb %xmm11, %xmm9 + pshufb %xmm11, %xmm12 + movdqa %xmm4, %xmm8 + movdqa %xmm5, %xmm13 + pclmulqdq $0x0, %xmm10, %xmm4 + pclmulqdq $0x11, %xmm10, %xmm8 + pclmulqdq $0x0, %xmm10, %xmm5 + pclmulqdq $0x11, %xmm10, %xmm13 + pxor %xmm9 , %xmm4 + xorps %xmm8 , %xmm4 + pxor %xmm12, %xmm5 + xorps %xmm13, %xmm5 + + movdqu 16*6(arg2), %xmm9 + movdqu 16*7(arg2), %xmm12 + pshufb %xmm11, %xmm9 + pshufb %xmm11, %xmm12 + movdqa %xmm6 , %xmm8 + movdqa %xmm7 , %xmm13 + pclmulqdq $0x0 , %xmm10, %xmm6 + pclmulqdq $0x11, %xmm10, %xmm8 + pclmulqdq $0x0 , %xmm10, %xmm7 + pclmulqdq $0x11, %xmm10, %xmm13 + pxor %xmm9 , %xmm6 + xorps %xmm8 , %xmm6 + pxor %xmm12, %xmm7 + xorps %xmm13, %xmm7 + + sub $128, arg3 + + # check if there is another 64B in the buffer to be able to fold + jge _fold_64_B_loop + ################################################################## + + + add $128, arg2 + # at this point, the buffer pointer is pointing at the last y Bytes + # of the buffer the 64B of folded data is in 4 of the xmm + # registers: xmm0, xmm1, xmm2, xmm3 + + + # fold the 8 xmm registers to 1 xmm register with different constants + + movdqa rk9(%rip), %xmm10 + movdqa %xmm0, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm0 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + xorps %xmm0, %xmm7 + + movdqa rk11(%rip), %xmm10 + movdqa %xmm1, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm1 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + xorps %xmm1, %xmm7 + + movdqa rk13(%rip), %xmm10 + movdqa %xmm2, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm2 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + pxor %xmm2, %xmm7 + + movdqa rk15(%rip), %xmm10 + movdqa %xmm3, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm3 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + xorps %xmm3, %xmm7 + + movdqa rk17(%rip), %xmm10 + movdqa %xmm4, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm4 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + pxor %xmm4, %xmm7 + + movdqa rk19(%rip), %xmm10 + movdqa %xmm5, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm5 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + xorps %xmm5, %xmm7 + + movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2 + #imm value of pclmulqdq instruction + #will determine which constant to use + movdqa %xmm6, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm6 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + pxor %xmm6, %xmm7 + + + # instead of 64, we add 48 to the loop counter to save 1 instruction + # from the loop instead of a cmp instruction, we use the negative + # flag with the jl instruction + add $128-16, arg3 + jl _final_reduction_for_128 + + # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7 + # and the rest is in memory. We can fold 16 bytes at a time if y>=16 + # continue folding 16B at a time + +_16B_reduction_loop: + movdqa %xmm7, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm7 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + movdqu (arg2), %xmm0 + pshufb %xmm11, %xmm0 + pxor %xmm0 , %xmm7 + add $16, arg2 + sub $16, arg3 + # instead of a cmp instruction, we utilize the flags with the + # jge instruction equivalent of: cmp arg3, 16-16 + # check if there is any more 16B in the buffer to be able to fold + jge _16B_reduction_loop + + #now we have 16+z bytes left to reduce, where 0<= z < 16. + #first, we reduce the data in the xmm7 register + + +_final_reduction_for_128: + # check if any more data to fold. If not, compute the CRC of + # the final 128 bits + add $16, arg3 + je _128_done + + # here we are getting data that is less than 16 bytes. + # since we know that there was data before the pointer, we can + # offset the input pointer before the actual point, to receive + # exactly 16 bytes. after that the registers need to be adjusted. +_get_last_two_xmms: + movdqa %xmm7, %xmm2 + + movdqu -16(arg2, arg3), %xmm1 + pshufb %xmm11, %xmm1 + + # get rid of the extra data that was loaded before + # load the shift constant + lea pshufb_shf_table+16(%rip), %rax + sub arg3, %rax + movdqu (%rax), %xmm0 + + # shift xmm2 to the left by arg3 bytes + pshufb %xmm0, %xmm2 + + # shift xmm7 to the right by 16-arg3 bytes + pxor mask1(%rip), %xmm0 + pshufb %xmm0, %xmm7 + pblendvb %xmm2, %xmm1 #xmm0 is implicit + + # fold 16 Bytes + movdqa %xmm1, %xmm2 + movdqa %xmm7, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm7 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + pxor %xmm2, %xmm7 + +_128_done: + # compute crc of a 128-bit value + movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10 + movdqa %xmm7, %xmm0 + + #64b fold + pclmulqdq $0x1, %xmm10, %xmm7 + pslldq $8 , %xmm0 + pxor %xmm0, %xmm7 + + #32b fold + movdqa %xmm7, %xmm0 + + pand mask2(%rip), %xmm0 + + psrldq $12, %xmm7 + pclmulqdq $0x10, %xmm10, %xmm7 + pxor %xmm0, %xmm7 + + #barrett reduction +_barrett: + movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10 + movdqa %xmm7, %xmm0 + pclmulqdq $0x01, %xmm10, %xmm7 + pslldq $4, %xmm7 + pclmulqdq $0x11, %xmm10, %xmm7 + + pslldq $4, %xmm7 + pxor %xmm0, %xmm7 + pextrd $1, %xmm7, %eax + +_cleanup: + # scale the result back to 16 bits + shr $16, %eax + mov %rcx, %rsp + ret + +######################################################################## + +.align 16 +_less_than_128: + + # check if there is enough buffer to be able to fold 16B at a time + cmp $32, arg3 + jl _less_than_32 + movdqa SHUF_MASK(%rip), %xmm11 + + # now if there is, load the constants + movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 + + movd arg1_low32, %xmm0 # get the initial crc value + pslldq $12, %xmm0 # align it to its correct place + movdqu (arg2), %xmm7 # load the plaintext + pshufb %xmm11, %xmm7 # byte-reflect the plaintext + pxor %xmm0, %xmm7 + + + # update the buffer pointer + add $16, arg2 + + # update the counter. subtract 32 instead of 16 to save one + # instruction from the loop + sub $32, arg3 + + jmp _16B_reduction_loop + + +.align 16 +_less_than_32: + # mov initial crc to the return value. this is necessary for + # zero-length buffers. + mov arg1_low32, %eax + test arg3, arg3 + je _cleanup + + movdqa SHUF_MASK(%rip), %xmm11 + + movd arg1_low32, %xmm0 # get the initial crc value + pslldq $12, %xmm0 # align it to its correct place + + cmp $16, arg3 + je _exact_16_left + jl _less_than_16_left + + movdqu (arg2), %xmm7 # load the plaintext + pshufb %xmm11, %xmm7 # byte-reflect the plaintext + pxor %xmm0 , %xmm7 # xor the initial crc value + add $16, arg2 + sub $16, arg3 + movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 + jmp _get_last_two_xmms + + +.align 16 +_less_than_16_left: + # use stack space to load data less than 16 bytes, zero-out + # the 16B in memory first. + + pxor %xmm1, %xmm1 + mov %rsp, %r11 + movdqa %xmm1, (%r11) + + cmp $4, arg3 + jl _only_less_than_4 + + # backup the counter value + mov arg3, %r9 + cmp $8, arg3 + jl _less_than_8_left + + # load 8 Bytes + mov (arg2), %rax + mov %rax, (%r11) + add $8, %r11 + sub $8, arg3 + add $8, arg2 +_less_than_8_left: + + cmp $4, arg3 + jl _less_than_4_left + + # load 4 Bytes + mov (arg2), %eax + mov %eax, (%r11) + add $4, %r11 + sub $4, arg3 + add $4, arg2 +_less_than_4_left: + + cmp $2, arg3 + jl _less_than_2_left + + # load 2 Bytes + mov (arg2), %ax + mov %ax, (%r11) + add $2, %r11 + sub $2, arg3 + add $2, arg2 +_less_than_2_left: + cmp $1, arg3 + jl _zero_left + + # load 1 Byte + mov (arg2), %al + mov %al, (%r11) +_zero_left: + movdqa (%rsp), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + # shl r9, 4 + lea pshufb_shf_table+16(%rip), %rax + sub %r9, %rax + movdqu (%rax), %xmm0 + pxor mask1(%rip), %xmm0 + + pshufb %xmm0, %xmm7 + jmp _128_done + +.align 16 +_exact_16_left: + movdqu (arg2), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + jmp _128_done + +_only_less_than_4: + cmp $3, arg3 + jl _only_less_than_3 + + # load 3 Bytes + mov (arg2), %al + mov %al, (%r11) + + mov 1(arg2), %al + mov %al, 1(%r11) + + mov 2(arg2), %al + mov %al, 2(%r11) + + movdqa (%rsp), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + psrldq $5, %xmm7 + + jmp _barrett +_only_less_than_3: + cmp $2, arg3 + jl _only_less_than_2 + + # load 2 Bytes + mov (arg2), %al + mov %al, (%r11) + + mov 1(arg2), %al + mov %al, 1(%r11) + + movdqa (%rsp), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + psrldq $6, %xmm7 + + jmp _barrett +_only_less_than_2: + + # load 1 Byte + mov (arg2), %al + mov %al, (%r11) + + movdqa (%rsp), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + psrldq $7, %xmm7 + + jmp _barrett + +ENDPROC(crc_t10dif_pcl) + +.data + +# precomputed constants +# these constants are precomputed from the poly: +# 0x8bb70000 (0x8bb7 scaled to 32 bits) +.align 16 +# Q = 0x18BB70000 +# rk1 = 2^(32*3) mod Q << 32 +# rk2 = 2^(32*5) mod Q << 32 +# rk3 = 2^(32*15) mod Q << 32 +# rk4 = 2^(32*17) mod Q << 32 +# rk5 = 2^(32*3) mod Q << 32 +# rk6 = 2^(32*2) mod Q << 32 +# rk7 = floor(2^64/Q) +# rk8 = Q +rk1: +.quad 0x2d56000000000000 +rk2: +.quad 0x06df000000000000 +rk3: +.quad 0x9d9d000000000000 +rk4: +.quad 0x7cf5000000000000 +rk5: +.quad 0x2d56000000000000 +rk6: +.quad 0x1368000000000000 +rk7: +.quad 0x00000001f65a57f8 +rk8: +.quad 0x000000018bb70000 + +rk9: +.quad 0xceae000000000000 +rk10: +.quad 0xbfd6000000000000 +rk11: +.quad 0x1e16000000000000 +rk12: +.quad 0x713c000000000000 +rk13: +.quad 0xf7f9000000000000 +rk14: +.quad 0x80a6000000000000 +rk15: +.quad 0x044c000000000000 +rk16: +.quad 0xe658000000000000 +rk17: +.quad 0xad18000000000000 +rk18: +.quad 0xa497000000000000 +rk19: +.quad 0x6ee3000000000000 +rk20: +.quad 0xe7b5000000000000 + + + +mask1: +.octa 0x80808080808080808080808080808080 +mask2: +.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF + +SHUF_MASK: +.octa 0x000102030405060708090A0B0C0D0E0F + +pshufb_shf_table: +# use these values for shift constants for the pshufb instruction +# different alignments result in values as shown: +# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 +# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 +# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 +# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 +# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 +# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 +# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 +# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 +# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 +# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 +# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 +# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 +# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 +# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 +# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 +.octa 0x8f8e8d8c8b8a89888786858483828100 +.octa 0x000e0d0c0b0a09080706050403020100 diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c new file mode 100644 index 000000000000..7845d7fd54c0 --- /dev/null +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -0,0 +1,151 @@ +/* + * Cryptographic API. + * + * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions + * + * Copyright (C) 2013 Intel Corporation + * Author: Tim Chen <tim.c.chen@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/crc-t10dif.h> +#include <crypto/internal/hash.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <asm/i387.h> +#include <asm/cpufeature.h> +#include <asm/cpu_device_id.h> + +asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, + size_t len); + +struct chksum_desc_ctx { + __u16 crc; +}; + +/* + * Steps through buffer one byte at at time, calculates reflected + * crc using table. + */ + +static int chksum_init(struct shash_desc *desc) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = 0; + + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + if (irq_fpu_usable()) { + kernel_fpu_begin(); + ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); + kernel_fpu_end(); + } else + ctx->crc = crc_t10dif_generic(ctx->crc, data, length); + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + *(__u16 *)out = ctx->crc; + return 0; +} + +static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + if (irq_fpu_usable()) { + kernel_fpu_begin(); + *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); + kernel_fpu_end(); + } else + *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, length, out); +} + +static struct shash_alg alg = { + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crct10dif", + .cra_driver_name = "crct10dif-pclmul", + .cra_priority = 200, + .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static const struct x86_cpu_id crct10dif_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id); + +static int __init crct10dif_intel_mod_init(void) +{ + if (!x86_match_cpu(crct10dif_cpu_id)) + return -ENODEV; + + return crypto_register_shash(&alg); +} + +static void __exit crct10dif_intel_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crct10dif_intel_mod_init); +module_exit(crct10dif_intel_mod_fini); + +MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); +MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ."); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS("crct10dif"); +MODULE_ALIAS("crct10dif-pclmul"); diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 597d4da69656..50226c4b86ed 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -187,7 +187,36 @@ static int sha256_ssse3_import(struct shash_desc *desc, const void *in) return 0; } -static struct shash_alg alg = { +static int sha224_ssse3_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count = 0; + + return 0; +} + +static int sha224_ssse3_final(struct shash_desc *desc, u8 *hash) +{ + u8 D[SHA256_DIGEST_SIZE]; + + sha256_ssse3_final(desc, D); + + memcpy(hash, D, SHA224_DIGEST_SIZE); + memset(D, 0, SHA256_DIGEST_SIZE); + + return 0; +} + +static struct shash_alg algs[] = { { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_ssse3_init, .update = sha256_ssse3_update, @@ -204,7 +233,24 @@ static struct shash_alg alg = { .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } -}; +}, { + .digestsize = SHA224_DIGEST_SIZE, + .init = sha224_ssse3_init, + .update = sha256_ssse3_update, + .final = sha224_ssse3_final, + .export = sha256_ssse3_export, + .import = sha256_ssse3_import, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-ssse3", + .cra_priority = 150, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +} }; #ifdef CONFIG_AS_AVX static bool __init avx_usable(void) @@ -227,7 +273,7 @@ static bool __init avx_usable(void) static int __init sha256_ssse3_mod_init(void) { - /* test for SSE3 first */ + /* test for SSSE3 first */ if (cpu_has_ssse3) sha256_transform_asm = sha256_transform_ssse3; @@ -254,7 +300,7 @@ static int __init sha256_ssse3_mod_init(void) else #endif pr_info("Using SSSE3 optimized SHA-256 implementation\n"); - return crypto_register_shash(&alg); + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } pr_info("Neither AVX nor SSSE3 is available/usable.\n"); @@ -263,7 +309,7 @@ static int __init sha256_ssse3_mod_init(void) static void __exit sha256_ssse3_mod_fini(void) { - crypto_unregister_shash(&alg); + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } module_init(sha256_ssse3_mod_init); @@ -273,3 +319,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); MODULE_ALIAS("sha256"); +MODULE_ALIAS("sha384"); diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index 6cbd8df348d2..f30cd10293f0 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -194,7 +194,37 @@ static int sha512_ssse3_import(struct shash_desc *desc, const void *in) return 0; } -static struct shash_alg alg = { +static int sha384_ssse3_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA384_H0; + sctx->state[1] = SHA384_H1; + sctx->state[2] = SHA384_H2; + sctx->state[3] = SHA384_H3; + sctx->state[4] = SHA384_H4; + sctx->state[5] = SHA384_H5; + sctx->state[6] = SHA384_H6; + sctx->state[7] = SHA384_H7; + + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static int sha384_ssse3_final(struct shash_desc *desc, u8 *hash) +{ + u8 D[SHA512_DIGEST_SIZE]; + + sha512_ssse3_final(desc, D); + + memcpy(hash, D, SHA384_DIGEST_SIZE); + memset(D, 0, SHA512_DIGEST_SIZE); + + return 0; +} + +static struct shash_alg algs[] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_ssse3_init, .update = sha512_ssse3_update, @@ -211,7 +241,24 @@ static struct shash_alg alg = { .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } -}; +}, { + .digestsize = SHA384_DIGEST_SIZE, + .init = sha384_ssse3_init, + .update = sha512_ssse3_update, + .final = sha384_ssse3_final, + .export = sha512_ssse3_export, + .import = sha512_ssse3_import, + .descsize = sizeof(struct sha512_state), + .statesize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha384", + .cra_driver_name = "sha384-ssse3", + .cra_priority = 150, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +} }; #ifdef CONFIG_AS_AVX static bool __init avx_usable(void) @@ -234,7 +281,7 @@ static bool __init avx_usable(void) static int __init sha512_ssse3_mod_init(void) { - /* test for SSE3 first */ + /* test for SSSE3 first */ if (cpu_has_ssse3) sha512_transform_asm = sha512_transform_ssse3; @@ -261,7 +308,7 @@ static int __init sha512_ssse3_mod_init(void) else #endif pr_info("Using SSSE3 optimized SHA-512 implementation\n"); - return crypto_register_shash(&alg); + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } pr_info("Neither AVX nor SSSE3 is available/usable.\n"); @@ -270,7 +317,7 @@ static int __init sha512_ssse3_mod_init(void) static void __exit sha512_ssse3_mod_fini(void) { - crypto_unregister_shash(&alg); + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } module_init(sha512_ssse3_mod_init); @@ -280,3 +327,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated"); MODULE_ALIAS("sha512"); +MODULE_ALIAS("sha384"); diff --git a/arch/x86/crypto/twofish-avx2-asm_64.S b/arch/x86/crypto/twofish-avx2-asm_64.S deleted file mode 100644 index e1a83b9cd389..000000000000 --- a/arch/x86/crypto/twofish-avx2-asm_64.S +++ /dev/null @@ -1,600 +0,0 @@ -/* - * x86_64/AVX2 assembler optimized version of Twofish - * - * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ - -#include <linux/linkage.h> -#include "glue_helper-asm-avx2.S" - -.file "twofish-avx2-asm_64.S" - -.data -.align 16 - -.Lvpshufb_mask0: -.long 0x80808000 -.long 0x80808004 -.long 0x80808008 -.long 0x8080800c - -.Lbswap128_mask: - .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -.Lxts_gf128mul_and_shl1_mask_0: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 -.Lxts_gf128mul_and_shl1_mask_1: - .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 - -.text - -/* structure of crypto context */ -#define s0 0 -#define s1 1024 -#define s2 2048 -#define s3 3072 -#define w 4096 -#define k 4128 - -/* register macros */ -#define CTX %rdi - -#define RS0 CTX -#define RS1 %r8 -#define RS2 %r9 -#define RS3 %r10 -#define RK %r11 -#define RW %rax -#define RROUND %r12 -#define RROUNDd %r12d - -#define RA0 %ymm8 -#define RB0 %ymm9 -#define RC0 %ymm10 -#define RD0 %ymm11 -#define RA1 %ymm12 -#define RB1 %ymm13 -#define RC1 %ymm14 -#define RD1 %ymm15 - -/* temp regs */ -#define RX0 %ymm0 -#define RY0 %ymm1 -#define RX1 %ymm2 -#define RY1 %ymm3 -#define RT0 %ymm4 -#define RIDX %ymm5 - -#define RX0x %xmm0 -#define RY0x %xmm1 -#define RX1x %xmm2 -#define RY1x %xmm3 -#define RT0x %xmm4 - -/* vpgatherdd mask and '-1' */ -#define RNOT %ymm6 - -/* byte mask, (-1 >> 24) */ -#define RBYTE %ymm7 - -/********************************************************************** - 16-way AVX2 twofish - **********************************************************************/ -#define init_round_constants() \ - vpcmpeqd RNOT, RNOT, RNOT; \ - vpsrld $24, RNOT, RBYTE; \ - leaq k(CTX), RK; \ - leaq w(CTX), RW; \ - leaq s1(CTX), RS1; \ - leaq s2(CTX), RS2; \ - leaq s3(CTX), RS3; \ - -#define g16(ab, rs0, rs1, rs2, rs3, xy) \ - vpand RBYTE, ab ## 0, RIDX; \ - vpgatherdd RNOT, (rs0, RIDX, 4), xy ## 0; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - \ - vpand RBYTE, ab ## 1, RIDX; \ - vpgatherdd RNOT, (rs0, RIDX, 4), xy ## 1; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - \ - vpsrld $8, ab ## 0, RIDX; \ - vpand RBYTE, RIDX, RIDX; \ - vpgatherdd RNOT, (rs1, RIDX, 4), RT0; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - vpxor RT0, xy ## 0, xy ## 0; \ - \ - vpsrld $8, ab ## 1, RIDX; \ - vpand RBYTE, RIDX, RIDX; \ - vpgatherdd RNOT, (rs1, RIDX, 4), RT0; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - vpxor RT0, xy ## 1, xy ## 1; \ - \ - vpsrld $16, ab ## 0, RIDX; \ - vpand RBYTE, RIDX, RIDX; \ - vpgatherdd RNOT, (rs2, RIDX, 4), RT0; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - vpxor RT0, xy ## 0, xy ## 0; \ - \ - vpsrld $16, ab ## 1, RIDX; \ - vpand RBYTE, RIDX, RIDX; \ - vpgatherdd RNOT, (rs2, RIDX, 4), RT0; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - vpxor RT0, xy ## 1, xy ## 1; \ - \ - vpsrld $24, ab ## 0, RIDX; \ - vpgatherdd RNOT, (rs3, RIDX, 4), RT0; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - vpxor RT0, xy ## 0, xy ## 0; \ - \ - vpsrld $24, ab ## 1, RIDX; \ - vpgatherdd RNOT, (rs3, RIDX, 4), RT0; \ - vpcmpeqd RNOT, RNOT, RNOT; \ - vpxor RT0, xy ## 1, xy ## 1; - -#define g1_16(a, x) \ - g16(a, RS0, RS1, RS2, RS3, x); - -#define g2_16(b, y) \ - g16(b, RS1, RS2, RS3, RS0, y); - -#define encrypt_round_end16(a, b, c, d, nk) \ - vpaddd RY0, RX0, RX0; \ - vpaddd RX0, RY0, RY0; \ - vpbroadcastd nk(RK,RROUND,8), RT0; \ - vpaddd RT0, RX0, RX0; \ - vpbroadcastd 4+nk(RK,RROUND,8), RT0; \ - vpaddd RT0, RY0, RY0; \ - \ - vpxor RY0, d ## 0, d ## 0; \ - \ - vpxor RX0, c ## 0, c ## 0; \ - vpsrld $1, c ## 0, RT0; \ - vpslld $31, c ## 0, c ## 0; \ - vpor RT0, c ## 0, c ## 0; \ - \ - vpaddd RY1, RX1, RX1; \ - vpaddd RX1, RY1, RY1; \ - vpbroadcastd nk(RK,RROUND,8), RT0; \ - vpaddd RT0, RX1, RX1; \ - vpbroadcastd 4+nk(RK,RROUND,8), RT0; \ - vpaddd RT0, RY1, RY1; \ - \ - vpxor RY1, d ## 1, d ## 1; \ - \ - vpxor RX1, c ## 1, c ## 1; \ - vpsrld $1, c ## 1, RT0; \ - vpslld $31, c ## 1, c ## 1; \ - vpor RT0, c ## 1, c ## 1; \ - -#define encrypt_round16(a, b, c, d, nk) \ - g2_16(b, RY); \ - \ - vpslld $1, b ## 0, RT0; \ - vpsrld $31, b ## 0, b ## 0; \ - vpor RT0, b ## 0, b ## 0; \ - \ - vpslld $1, b ## 1, RT0; \ - vpsrld $31, b ## 1, b ## 1; \ - vpor RT0, b ## 1, b ## 1; \ - \ - g1_16(a, RX); \ - \ - encrypt_round_end16(a, b, c, d, nk); - -#define encrypt_round_first16(a, b, c, d, nk) \ - vpslld $1, d ## 0, RT0; \ - vpsrld $31, d ## 0, d ## 0; \ - vpor RT0, d ## 0, d ## 0; \ - \ - vpslld $1, d ## 1, RT0; \ - vpsrld $31, d ## 1, d ## 1; \ - vpor RT0, d ## 1, d ## 1; \ - \ - encrypt_round16(a, b, c, d, nk); - -#define encrypt_round_last16(a, b, c, d, nk) \ - g2_16(b, RY); \ - \ - g1_16(a, RX); \ - \ - encrypt_round_end16(a, b, c, d, nk); - -#define decrypt_round_end16(a, b, c, d, nk) \ - vpaddd RY0, RX0, RX0; \ - vpaddd RX0, RY0, RY0; \ - vpbroadcastd nk(RK,RROUND,8), RT0; \ - vpaddd RT0, RX0, RX0; \ - vpbroadcastd 4+nk(RK,RROUND,8), RT0; \ - vpaddd RT0, RY0, RY0; \ - \ - vpxor RX0, c ## 0, c ## 0; \ - \ - vpxor RY0, d ## 0, d ## 0; \ - vpsrld $1, d ## 0, RT0; \ - vpslld $31, d ## 0, d ## 0; \ - vpor RT0, d ## 0, d ## 0; \ - \ - vpaddd RY1, RX1, RX1; \ - vpaddd RX1, RY1, RY1; \ - vpbroadcastd nk(RK,RROUND,8), RT0; \ - vpaddd RT0, RX1, RX1; \ - vpbroadcastd 4+nk(RK,RROUND,8), RT0; \ - vpaddd RT0, RY1, RY1; \ - \ - vpxor RX1, c ## 1, c ## 1; \ - \ - vpxor RY1, d ## 1, d ## 1; \ - vpsrld $1, d ## 1, RT0; \ - vpslld $31, d ## 1, d ## 1; \ - vpor RT0, d ## 1, d ## 1; - -#define decrypt_round16(a, b, c, d, nk) \ - g1_16(a, RX); \ - \ - vpslld $1, a ## 0, RT0; \ - vpsrld $31, a ## 0, a ## 0; \ - vpor RT0, a ## 0, a ## 0; \ - \ - vpslld $1, a ## 1, RT0; \ - vpsrld $31, a ## 1, a ## 1; \ - vpor RT0, a ## 1, a ## 1; \ - \ - g2_16(b, RY); \ - \ - decrypt_round_end16(a, b, c, d, nk); - -#define decrypt_round_first16(a, b, c, d, nk) \ - vpslld $1, c ## 0, RT0; \ - vpsrld $31, c ## 0, c ## 0; \ - vpor RT0, c ## 0, c ## 0; \ - \ - vpslld $1, c ## 1, RT0; \ - vpsrld $31, c ## 1, c ## 1; \ - vpor RT0, c ## 1, c ## 1; \ - \ - decrypt_round16(a, b, c, d, nk) - -#define decrypt_round_last16(a, b, c, d, nk) \ - g1_16(a, RX); \ - \ - g2_16(b, RY); \ - \ - decrypt_round_end16(a, b, c, d, nk); - -#define encrypt_cycle16() \ - encrypt_round16(RA, RB, RC, RD, 0); \ - encrypt_round16(RC, RD, RA, RB, 8); - -#define encrypt_cycle_first16() \ - encrypt_round_first16(RA, RB, RC, RD, 0); \ - encrypt_round16(RC, RD, RA, RB, 8); - -#define encrypt_cycle_last16() \ - encrypt_round16(RA, RB, RC, RD, 0); \ - encrypt_round_last16(RC, RD, RA, RB, 8); - -#define decrypt_cycle16(n) \ - decrypt_round16(RC, RD, RA, RB, 8); \ - decrypt_round16(RA, RB, RC, RD, 0); - -#define decrypt_cycle_first16(n) \ - decrypt_round_first16(RC, RD, RA, RB, 8); \ - decrypt_round16(RA, RB, RC, RD, 0); - -#define decrypt_cycle_last16(n) \ - decrypt_round16(RC, RD, RA, RB, 8); \ - decrypt_round_last16(RA, RB, RC, RD, 0); - -#define transpose_4x4(x0,x1,x2,x3,t1,t2) \ - vpunpckhdq x1, x0, t2; \ - vpunpckldq x1, x0, x0; \ - \ - vpunpckldq x3, x2, t1; \ - vpunpckhdq x3, x2, x2; \ - \ - vpunpckhqdq t1, x0, x1; \ - vpunpcklqdq t1, x0, x0; \ - \ - vpunpckhqdq x2, t2, x3; \ - vpunpcklqdq x2, t2, x2; - -#define read_blocks8(offs,a,b,c,d) \ - transpose_4x4(a, b, c, d, RX0, RY0); - -#define write_blocks8(offs,a,b,c,d) \ - transpose_4x4(a, b, c, d, RX0, RY0); - -#define inpack_enc8(a,b,c,d) \ - vpbroadcastd 4*0(RW), RT0; \ - vpxor RT0, a, a; \ - \ - vpbroadcastd 4*1(RW), RT0; \ - vpxor RT0, b, b; \ - \ - vpbroadcastd 4*2(RW), RT0; \ - vpxor RT0, c, c; \ - \ - vpbroadcastd 4*3(RW), RT0; \ - vpxor RT0, d, d; - -#define outunpack_enc8(a,b,c,d) \ - vpbroadcastd 4*4(RW), RX0; \ - vpbroadcastd 4*5(RW), RY0; \ - vpxor RX0, c, RX0; \ - vpxor RY0, d, RY0; \ - \ - vpbroadcastd 4*6(RW), RT0; \ - vpxor RT0, a, c; \ - vpbroadcastd 4*7(RW), RT0; \ - vpxor RT0, b, d; \ - \ - vmovdqa RX0, a; \ - vmovdqa RY0, b; - -#define inpack_dec8(a,b,c,d) \ - vpbroadcastd 4*4(RW), RX0; \ - vpbroadcastd 4*5(RW), RY0; \ - vpxor RX0, a, RX0; \ - vpxor RY0, b, RY0; \ - \ - vpbroadcastd 4*6(RW), RT0; \ - vpxor RT0, c, a; \ - vpbroadcastd 4*7(RW), RT0; \ - vpxor RT0, d, b; \ - \ - vmovdqa RX0, c; \ - vmovdqa RY0, d; - -#define outunpack_dec8(a,b,c,d) \ - vpbroadcastd 4*0(RW), RT0; \ - vpxor RT0, a, a; \ - \ - vpbroadcastd 4*1(RW), RT0; \ - vpxor RT0, b, b; \ - \ - vpbroadcastd 4*2(RW), RT0; \ - vpxor RT0, c, c; \ - \ - vpbroadcastd 4*3(RW), RT0; \ - vpxor RT0, d, d; - -#define read_blocks16(a,b,c,d) \ - read_blocks8(0, a ## 0, b ## 0, c ## 0, d ## 0); \ - read_blocks8(8, a ## 1, b ## 1, c ## 1, d ## 1); - -#define write_blocks16(a,b,c,d) \ - write_blocks8(0, a ## 0, b ## 0, c ## 0, d ## 0); \ - write_blocks8(8, a ## 1, b ## 1, c ## 1, d ## 1); - -#define xor_blocks16(a,b,c,d) \ - xor_blocks8(0, a ## 0, b ## 0, c ## 0, d ## 0); \ - xor_blocks8(8, a ## 1, b ## 1, c ## 1, d ## 1); - -#define inpack_enc16(a,b,c,d) \ - inpack_enc8(a ## 0, b ## 0, c ## 0, d ## 0); \ - inpack_enc8(a ## 1, b ## 1, c ## 1, d ## 1); - -#define outunpack_enc16(a,b,c,d) \ - outunpack_enc8(a ## 0, b ## 0, c ## 0, d ## 0); \ - outunpack_enc8(a ## 1, b ## 1, c ## 1, d ## 1); - -#define inpack_dec16(a,b,c,d) \ - inpack_dec8(a ## 0, b ## 0, c ## 0, d ## 0); \ - inpack_dec8(a ## 1, b ## 1, c ## 1, d ## 1); - -#define outunpack_dec16(a,b,c,d) \ - outunpack_dec8(a ## 0, b ## 0, c ## 0, d ## 0); \ - outunpack_dec8(a ## 1, b ## 1, c ## 1, d ## 1); - -.align 8 -__twofish_enc_blk16: - /* input: - * %rdi: ctx, CTX - * RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1: plaintext - * output: - * RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1: ciphertext - */ - init_round_constants(); - - read_blocks16(RA, RB, RC, RD); - inpack_enc16(RA, RB, RC, RD); - - xorl RROUNDd, RROUNDd; - encrypt_cycle_first16(); - movl $2, RROUNDd; - -.align 4 -.L__enc_loop: - encrypt_cycle16(); - - addl $2, RROUNDd; - cmpl $14, RROUNDd; - jne .L__enc_loop; - - encrypt_cycle_last16(); - - outunpack_enc16(RA, RB, RC, RD); - write_blocks16(RA, RB, RC, RD); - - ret; -ENDPROC(__twofish_enc_blk16) - -.align 8 -__twofish_dec_blk16: - /* input: - * %rdi: ctx, CTX - * RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1: ciphertext - * output: - * RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1: plaintext - */ - init_round_constants(); - - read_blocks16(RA, RB, RC, RD); - inpack_dec16(RA, RB, RC, RD); - - movl $14, RROUNDd; - decrypt_cycle_first16(); - movl $12, RROUNDd; - -.align 4 -.L__dec_loop: - decrypt_cycle16(); - - addl $-2, RROUNDd; - jnz .L__dec_loop; - - decrypt_cycle_last16(); - - outunpack_dec16(RA, RB, RC, RD); - write_blocks16(RA, RB, RC, RD); - - ret; -ENDPROC(__twofish_dec_blk16) - -ENTRY(twofish_ecb_enc_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - */ - - vzeroupper; - pushq %r12; - - load_16way(%rdx, RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1); - - call __twofish_enc_blk16; - - store_16way(%rsi, RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1); - - popq %r12; - vzeroupper; - - ret; -ENDPROC(twofish_ecb_enc_16way) - -ENTRY(twofish_ecb_dec_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - */ - - vzeroupper; - pushq %r12; - - load_16way(%rdx, RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1); - - call __twofish_dec_blk16; - - store_16way(%rsi, RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1); - - popq %r12; - vzeroupper; - - ret; -ENDPROC(twofish_ecb_dec_16way) - -ENTRY(twofish_cbc_dec_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - */ - - vzeroupper; - pushq %r12; - - load_16way(%rdx, RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1); - - call __twofish_dec_blk16; - - store_cbc_16way(%rdx, %rsi, RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1, - RX0); - - popq %r12; - vzeroupper; - - ret; -ENDPROC(twofish_cbc_dec_16way) - -ENTRY(twofish_ctr_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (little endian, 128bit) - */ - - vzeroupper; - pushq %r12; - - load_ctr_16way(%rcx, .Lbswap128_mask, RA0, RB0, RC0, RD0, RA1, RB1, RC1, - RD1, RX0, RX0x, RX1, RX1x, RY0, RY0x, RY1, RY1x, RNOT, - RBYTE); - - call __twofish_enc_blk16; - - store_ctr_16way(%rdx, %rsi, RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1); - - popq %r12; - vzeroupper; - - ret; -ENDPROC(twofish_ctr_16way) - -.align 8 -twofish_xts_crypt_16way: - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - * %r8: pointer to __twofish_enc_blk16 or __twofish_dec_blk16 - */ - - vzeroupper; - pushq %r12; - - load_xts_16way(%rcx, %rdx, %rsi, RA0, RB0, RC0, RD0, RA1, RB1, RC1, - RD1, RX0, RX0x, RX1, RX1x, RY0, RY0x, RY1, RY1x, RNOT, - .Lxts_gf128mul_and_shl1_mask_0, - .Lxts_gf128mul_and_shl1_mask_1); - - call *%r8; - - store_xts_16way(%rsi, RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1); - - popq %r12; - vzeroupper; - - ret; -ENDPROC(twofish_xts_crypt_16way) - -ENTRY(twofish_xts_enc_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - leaq __twofish_enc_blk16, %r8; - jmp twofish_xts_crypt_16way; -ENDPROC(twofish_xts_enc_16way) - -ENTRY(twofish_xts_dec_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - leaq __twofish_dec_blk16, %r8; - jmp twofish_xts_crypt_16way; -ENDPROC(twofish_xts_dec_16way) diff --git a/arch/x86/crypto/twofish_avx2_glue.c b/arch/x86/crypto/twofish_avx2_glue.c deleted file mode 100644 index ce33b5be64ee..000000000000 --- a/arch/x86/crypto/twofish_avx2_glue.c +++ /dev/null @@ -1,584 +0,0 @@ -/* - * Glue Code for x86_64/AVX2 assembler optimized version of Twofish - * - * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> -#include <linux/err.h> -#include <crypto/algapi.h> -#include <crypto/ctr.h> -#include <crypto/twofish.h> -#include <crypto/lrw.h> -#include <crypto/xts.h> -#include <asm/xcr.h> -#include <asm/xsave.h> -#include <asm/crypto/twofish.h> -#include <asm/crypto/ablk_helper.h> -#include <asm/crypto/glue_helper.h> -#include <crypto/scatterwalk.h> - -#define TF_AVX2_PARALLEL_BLOCKS 16 - -/* 16-way AVX2 parallel cipher functions */ -asmlinkage void twofish_ecb_enc_16way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_ecb_dec_16way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src); - -asmlinkage void twofish_ctr_16way(void *ctx, u128 *dst, const u128 *src, - le128 *iv); - -asmlinkage void twofish_xts_enc_16way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void twofish_xts_dec_16way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); - -static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src) -{ - __twofish_enc_blk_3way(ctx, dst, src, false); -} - -static const struct common_glue_ctx twofish_enc = { - .num_funcs = 4, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_16way) } - }, { - .num_blocks = 8, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) } - }, { - .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } - }, { - .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } - } } -}; - -static const struct common_glue_ctx twofish_ctr = { - .num_funcs = 4, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_16way) } - }, { - .num_blocks = 8, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) } - }, { - .num_blocks = 3, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) } - }, { - .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) } - } } -}; - -static const struct common_glue_ctx twofish_enc_xts = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_16way) } - }, { - .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_8way) } - }, { - .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc) } - } } -}; - -static const struct common_glue_ctx twofish_dec = { - .num_funcs = 4, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_16way) } - }, { - .num_blocks = 8, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) } - }, { - .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } - }, { - .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } - } } -}; - -static const struct common_glue_ctx twofish_dec_cbc = { - .num_funcs = 4, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_16way) } - }, { - .num_blocks = 8, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) } - }, { - .num_blocks = 3, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } - }, { - .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } - } } -}; - -static const struct common_glue_ctx twofish_dec_xts = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_16way) } - }, { - .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_8way) } - }, { - .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec) } - } } -}; - -static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes); -} - -static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes); -} - -static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc, - dst, src, nbytes); -} - -static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src, - nbytes); -} - -static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes); -} - -static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes) -{ - /* since reusing AVX functions, starts using FPU at 8 parallel blocks */ - return glue_fpu_begin(TF_BLOCK_SIZE, 8, NULL, fpu_enabled, nbytes); -} - -static inline void twofish_fpu_end(bool fpu_enabled) -{ - glue_fpu_end(fpu_enabled); -} - -struct crypt_priv { - struct twofish_ctx *ctx; - bool fpu_enabled; -}; - -static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) -{ - const unsigned int bsize = TF_BLOCK_SIZE; - struct crypt_priv *ctx = priv; - int i; - - ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes); - - while (nbytes >= TF_AVX2_PARALLEL_BLOCKS * bsize) { - twofish_ecb_enc_16way(ctx->ctx, srcdst, srcdst); - srcdst += bsize * TF_AVX2_PARALLEL_BLOCKS; - nbytes -= bsize * TF_AVX2_PARALLEL_BLOCKS; - } - - while (nbytes >= 8 * bsize) { - twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst); - srcdst += bsize * 8; - nbytes -= bsize * 8; - } - - while (nbytes >= 3 * bsize) { - twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst); - srcdst += bsize * 3; - nbytes -= bsize * 3; - } - - for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) - twofish_enc_blk(ctx->ctx, srcdst, srcdst); -} - -static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) -{ - const unsigned int bsize = TF_BLOCK_SIZE; - struct crypt_priv *ctx = priv; - int i; - - ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes); - - while (nbytes >= TF_AVX2_PARALLEL_BLOCKS * bsize) { - twofish_ecb_dec_16way(ctx->ctx, srcdst, srcdst); - srcdst += bsize * TF_AVX2_PARALLEL_BLOCKS; - nbytes -= bsize * TF_AVX2_PARALLEL_BLOCKS; - } - - while (nbytes >= 8 * bsize) { - twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst); - srcdst += bsize * 8; - nbytes -= bsize * 8; - } - - while (nbytes >= 3 * bsize) { - twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst); - srcdst += bsize * 3; - nbytes -= bsize * 3; - } - - for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) - twofish_dec_blk(ctx->ctx, srcdst, srcdst); -} - -static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - be128 buf[TF_AVX2_PARALLEL_BLOCKS]; - struct crypt_priv crypt_ctx = { - .ctx = &ctx->twofish_ctx, - .fpu_enabled = false, - }; - struct lrw_crypt_req req = { - .tbuf = buf, - .tbuflen = sizeof(buf), - - .table_ctx = &ctx->lrw_table, - .crypt_ctx = &crypt_ctx, - .crypt_fn = encrypt_callback, - }; - int ret; - - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - ret = lrw_crypt(desc, dst, src, nbytes, &req); - twofish_fpu_end(crypt_ctx.fpu_enabled); - - return ret; -} - -static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - be128 buf[TF_AVX2_PARALLEL_BLOCKS]; - struct crypt_priv crypt_ctx = { - .ctx = &ctx->twofish_ctx, - .fpu_enabled = false, - }; - struct lrw_crypt_req req = { - .tbuf = buf, - .tbuflen = sizeof(buf), - - .table_ctx = &ctx->lrw_table, - .crypt_ctx = &crypt_ctx, - .crypt_fn = decrypt_callback, - }; - int ret; - - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - ret = lrw_crypt(desc, dst, src, nbytes, &req); - twofish_fpu_end(crypt_ctx.fpu_enabled); - - return ret; -} - -static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - - return glue_xts_crypt_128bit(&twofish_enc_xts, desc, dst, src, nbytes, - XTS_TWEAK_CAST(twofish_enc_blk), - &ctx->tweak_ctx, &ctx->crypt_ctx); -} - -static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) -{ - struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - - return glue_xts_crypt_128bit(&twofish_dec_xts, desc, dst, src, nbytes, - XTS_TWEAK_CAST(twofish_enc_blk), - &ctx->tweak_ctx, &ctx->crypt_ctx); -} - -static struct crypto_alg tf_algs[10] = { { - .cra_name = "__ecb-twofish-avx2", - .cra_driver_name = "__driver-ecb-twofish-avx2", - .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct twofish_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .setkey = twofish_setkey, - .encrypt = ecb_encrypt, - .decrypt = ecb_decrypt, - }, - }, -}, { - .cra_name = "__cbc-twofish-avx2", - .cra_driver_name = "__driver-cbc-twofish-avx2", - .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct twofish_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .setkey = twofish_setkey, - .encrypt = cbc_encrypt, - .decrypt = cbc_decrypt, - }, - }, -}, { - .cra_name = "__ctr-twofish-avx2", - .cra_driver_name = "__driver-ctr-twofish-avx2", - .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct twofish_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .ivsize = TF_BLOCK_SIZE, - .setkey = twofish_setkey, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, - }, - }, -}, { - .cra_name = "__lrw-twofish-avx2", - .cra_driver_name = "__driver-lrw-twofish-avx2", - .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct twofish_lrw_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_exit = lrw_twofish_exit_tfm, - .cra_u = { - .blkcipher = { - .min_keysize = TF_MIN_KEY_SIZE + - TF_BLOCK_SIZE, - .max_keysize = TF_MAX_KEY_SIZE + - TF_BLOCK_SIZE, - .ivsize = TF_BLOCK_SIZE, - .setkey = lrw_twofish_setkey, - .encrypt = lrw_encrypt, - .decrypt = lrw_decrypt, - }, - }, -}, { - .cra_name = "__xts-twofish-avx2", - .cra_driver_name = "__driver-xts-twofish-avx2", - .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct twofish_xts_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = TF_MIN_KEY_SIZE * 2, - .max_keysize = TF_MAX_KEY_SIZE * 2, - .ivsize = TF_BLOCK_SIZE, - .setkey = xts_twofish_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, - }, - }, -}, { - .cra_name = "ecb(twofish)", - .cra_driver_name = "ecb-twofish-avx2", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct async_helper_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = ablk_init, - .cra_exit = ablk_exit, - .cra_u = { - .ablkcipher = { - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .setkey = ablk_set_key, - .encrypt = ablk_encrypt, - .decrypt = ablk_decrypt, - }, - }, -}, { - .cra_name = "cbc(twofish)", - .cra_driver_name = "cbc-twofish-avx2", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct async_helper_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = ablk_init, - .cra_exit = ablk_exit, - .cra_u = { - .ablkcipher = { - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .ivsize = TF_BLOCK_SIZE, - .setkey = ablk_set_key, - .encrypt = __ablk_encrypt, - .decrypt = ablk_decrypt, - }, - }, -}, { - .cra_name = "ctr(twofish)", - .cra_driver_name = "ctr-twofish-avx2", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct async_helper_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = ablk_init, - .cra_exit = ablk_exit, - .cra_u = { - .ablkcipher = { - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .ivsize = TF_BLOCK_SIZE, - .setkey = ablk_set_key, - .encrypt = ablk_encrypt, - .decrypt = ablk_encrypt, - .geniv = "chainiv", - }, - }, -}, { - .cra_name = "lrw(twofish)", - .cra_driver_name = "lrw-twofish-avx2", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct async_helper_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = ablk_init, - .cra_exit = ablk_exit, - .cra_u = { - .ablkcipher = { - .min_keysize = TF_MIN_KEY_SIZE + - TF_BLOCK_SIZE, - .max_keysize = TF_MAX_KEY_SIZE + - TF_BLOCK_SIZE, - .ivsize = TF_BLOCK_SIZE, - .setkey = ablk_set_key, - .encrypt = ablk_encrypt, - .decrypt = ablk_decrypt, - }, - }, -}, { - .cra_name = "xts(twofish)", - .cra_driver_name = "xts-twofish-avx2", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct async_helper_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = ablk_init, - .cra_exit = ablk_exit, - .cra_u = { - .ablkcipher = { - .min_keysize = TF_MIN_KEY_SIZE * 2, - .max_keysize = TF_MAX_KEY_SIZE * 2, - .ivsize = TF_BLOCK_SIZE, - .setkey = ablk_set_key, - .encrypt = ablk_encrypt, - .decrypt = ablk_decrypt, - }, - }, -} }; - -static int __init init(void) -{ - u64 xcr0; - - if (!cpu_has_avx2 || !cpu_has_osxsave) { - pr_info("AVX2 instructions are not detected.\n"); - return -ENODEV; - } - - xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); - if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { - pr_info("AVX2 detected but unusable.\n"); - return -ENODEV; - } - - return crypto_register_algs(tf_algs, ARRAY_SIZE(tf_algs)); -} - -static void __exit fini(void) -{ - crypto_unregister_algs(tf_algs, ARRAY_SIZE(tf_algs)); -} - -module_init(init); -module_exit(fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX2 optimized"); -MODULE_ALIAS("twofish"); -MODULE_ALIAS("twofish-asm"); diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index 2047a562f6b3..a62ba541884e 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -50,26 +50,18 @@ /* 8-way parallel cipher functions */ asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst, const u8 *src); -EXPORT_SYMBOL_GPL(twofish_ecb_enc_8way); - asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst, const u8 *src); -EXPORT_SYMBOL_GPL(twofish_ecb_dec_8way); asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst, const u8 *src); -EXPORT_SYMBOL_GPL(twofish_cbc_dec_8way); - asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst, const u8 *src, le128 *iv); -EXPORT_SYMBOL_GPL(twofish_ctr_8way); asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst, const u8 *src, le128 *iv); -EXPORT_SYMBOL_GPL(twofish_xts_enc_8way); asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst, const u8 *src, le128 *iv); -EXPORT_SYMBOL_GPL(twofish_xts_dec_8way); static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, const u8 *src) @@ -77,19 +69,17 @@ static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, __twofish_enc_blk_3way(ctx, dst, src, false); } -void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) { glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(twofish_enc_blk)); } -EXPORT_SYMBOL_GPL(twofish_xts_enc); -void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) { glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(twofish_dec_blk)); } -EXPORT_SYMBOL_GPL(twofish_xts_dec); static const struct common_glue_ctx twofish_enc = { diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 52ff81cce008..bae3aba95b15 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -308,8 +308,6 @@ static int load_aout_binary(struct linux_binprm *bprm) (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); - current->mm->free_area_cache = TASK_UNMAPPED_BASE; - current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); if (retval < 0) { diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index b31bf97775fc..2dfac58f3b11 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -111,7 +111,7 @@ static inline void acpi_disable_pci(void) } /* Low-level suspend routine. */ -extern int acpi_suspend_lowlevel(void); +extern int (*acpi_suspend_lowlevel)(void); /* Physical address to resume after wakeup */ #define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start)) diff --git a/arch/x86/include/asm/crypto/blowfish.h b/arch/x86/include/asm/crypto/blowfish.h deleted file mode 100644 index f097b2face10..000000000000 --- a/arch/x86/include/asm/crypto/blowfish.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef ASM_X86_BLOWFISH_H -#define ASM_X86_BLOWFISH_H - -#include <linux/crypto.h> -#include <crypto/blowfish.h> - -#define BF_PARALLEL_BLOCKS 4 - -/* regular block cipher functions */ -asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src, - bool xor); -asmlinkage void blowfish_dec_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src); - -/* 4-way parallel cipher functions */ -asmlinkage void __blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst, - const u8 *src, bool xor); -asmlinkage void blowfish_dec_blk_4way(struct bf_ctx *ctx, u8 *dst, - const u8 *src); - -static inline void blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src) -{ - __blowfish_enc_blk(ctx, dst, src, false); -} - -static inline void blowfish_enc_blk_xor(struct bf_ctx *ctx, u8 *dst, - const u8 *src) -{ - __blowfish_enc_blk(ctx, dst, src, true); -} - -static inline void blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst, - const u8 *src) -{ - __blowfish_enc_blk_4way(ctx, dst, src, false); -} - -static inline void blowfish_enc_blk_xor_4way(struct bf_ctx *ctx, u8 *dst, - const u8 *src) -{ - __blowfish_enc_blk_4way(ctx, dst, src, true); -} - -#endif diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h index e655c6029b45..878c51ceebb5 100644 --- a/arch/x86/include/asm/crypto/twofish.h +++ b/arch/x86/include/asm/crypto/twofish.h @@ -28,20 +28,6 @@ asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, const u8 *src); -/* 8-way parallel cipher functions */ -asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); - /* helpers from twofish_x86_64-3way module */ extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src); extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, @@ -57,8 +43,4 @@ extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm); extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); -/* helpers from twofish-avx module */ -extern void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); -extern void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); - #endif /* ASM_X86_TWOFISH_H */ diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h index 75ce3f47d204..77a99ac06d00 100644 --- a/arch/x86/include/asm/emergency-restart.h +++ b/arch/x86/include/asm/emergency-restart.h @@ -1,18 +1,6 @@ #ifndef _ASM_X86_EMERGENCY_RESTART_H #define _ASM_X86_EMERGENCY_RESTART_H -enum reboot_type { - BOOT_TRIPLE = 't', - BOOT_KBD = 'k', - BOOT_BIOS = 'b', - BOOT_ACPI = 'a', - BOOT_EFI = 'e', - BOOT_CF9 = 'p', - BOOT_CF9_COND = 'q', -}; - -extern enum reboot_type reboot_type; - extern void machine_emergency_restart(void); #endif /* _ASM_X86_EMERGENCY_RESTART_H */ diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index d8e8eefbe24c..34f69cb9350a 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -345,4 +345,11 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, #define IO_SPACE_LIMIT 0xffff +#ifdef CONFIG_MTRR +extern int __must_check arch_phys_wc_add(unsigned long base, + unsigned long size); +extern void arch_phys_wc_del(int handle); +#define arch_phys_wc_add arch_phys_wc_add +#endif + #endif /* _ASM_X86_IO_H */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f87f7fcefa0a..c0efd16bdfa1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -286,6 +286,7 @@ struct kvm_mmu { u64 *pae_root; u64 *lm_root; u64 rsvd_bits_mask[2][4]; + u64 bad_mt_xwr; /* * Bitmap: bit set = last pte in walk @@ -323,6 +324,7 @@ struct kvm_pmu { u64 global_ovf_ctrl; u64 counter_bitmask[2]; u64 global_ctrl_mask; + u64 reserved_bits; u8 version; struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; @@ -511,6 +513,9 @@ struct kvm_vcpu_arch { * instruction. */ bool write_fault_to_shadow_pgtable; + + /* set at EPT violation at this point */ + unsigned long exit_qualification; }; struct kvm_lpage_info { @@ -802,8 +807,8 @@ extern u32 kvm_min_guest_tsc_khz; extern u32 kvm_max_guest_tsc_khz; enum emulation_result { - EMULATE_DONE, /* no further processing */ - EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ + EMULATE_DONE, /* no further processing */ + EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */ EMULATE_FAIL, /* can't emulate this instruction */ }; diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h index d354fb781c57..a55c7efcc4ed 100644 --- a/arch/x86/include/asm/mc146818rtc.h +++ b/arch/x86/include/asm/mc146818rtc.h @@ -95,8 +95,8 @@ static inline unsigned char current_lock_cmos_reg(void) unsigned char rtc_cmos_read(unsigned char addr); void rtc_cmos_write(unsigned char val, unsigned char addr); -extern int mach_set_rtc_mmss(unsigned long nowtime); -extern unsigned long mach_get_cmos_time(void); +extern int mach_set_rtc_mmss(const struct timespec *now); +extern void mach_get_cmos_time(struct timespec *now); #define RTC_IRQ 8 diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 6b52980c29c1..29e3093bbd21 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -214,6 +214,13 @@ void mce_log_therm_throt_event(__u64 status); /* Interrupt Handler for core thermal thresholds */ extern int (*platform_thermal_notify)(__u64 msr_val); +/* Interrupt Handler for package thermal thresholds */ +extern int (*platform_thermal_package_notify)(__u64 msr_val); + +/* Callback support of rate control, return true, if + * callback has rate control */ +extern bool (*platform_thermal_package_rate_control)(void); + #ifdef CONFIG_X86_THERMAL_VECTOR extern void mcheck_intel_therm_init(void); #else diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/mrst-vrtc.h index 73668abdbedf..1e69a75412a4 100644 --- a/arch/x86/include/asm/mrst-vrtc.h +++ b/arch/x86/include/asm/mrst-vrtc.h @@ -3,7 +3,7 @@ extern unsigned char vrtc_cmos_read(unsigned char reg); extern void vrtc_cmos_write(unsigned char val, unsigned char reg); -extern unsigned long vrtc_get_time(void); -extern int vrtc_set_mmss(unsigned long nowtime); +extern void vrtc_get_time(struct timespec *now); +extern int vrtc_set_mmss(const struct timespec *now); #endif diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index e235582f9930..f768f6298419 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h @@ -26,7 +26,10 @@ #include <uapi/asm/mtrr.h> -/* The following functions are for use by other drivers */ +/* + * The following functions are for use by other drivers that cannot use + * arch_phys_wc_add and arch_phys_wc_del. + */ # ifdef CONFIG_MTRR extern u8 mtrr_type_lookup(u64 addr, u64 end); extern void mtrr_save_fixed_ranges(void *); @@ -45,6 +48,7 @@ extern void mtrr_aps_init(void); extern void mtrr_bp_restore(void); extern int mtrr_trim_uncached_memory(unsigned long end_pfn); extern int amd_special_default_mtrr(void); +extern int phys_wc_to_mtrr_index(int handle); # else static inline u8 mtrr_type_lookup(u64 addr, u64 end) { @@ -80,6 +84,10 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) { } +static inline int phys_wc_to_mtrr_index(int handle) +{ + return -1; +} #define mtrr_ap_init() do {} while (0) #define mtrr_bp_init() do {} while (0) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5b0818bc8963..7dc305a46058 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -207,7 +207,7 @@ static inline pte_t pte_mkexec(pte_t pte) static inline pte_t pte_mkdirty(pte_t pte) { - return pte_set_flags(pte, _PAGE_DIRTY); + return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); } static inline pte_t pte_mkyoung(pte_t pte) @@ -271,7 +271,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd) static inline pmd_t pmd_mkdirty(pmd_t pmd) { - return pmd_set_flags(pmd, _PAGE_DIRTY); + return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); } static inline pmd_t pmd_mkhuge(pmd_t pmd) @@ -294,6 +294,26 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd) return pmd_clear_flags(pmd, _PAGE_PRESENT); } +static inline int pte_soft_dirty(pte_t pte) +{ + return pte_flags(pte) & _PAGE_SOFT_DIRTY; +} + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; +} + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return pte_set_flags(pte, _PAGE_SOFT_DIRTY); +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); +} + /* * Mask out unsupported bits in a present pgprot. Non-present pgprots * can use those bits for other purposes, so leave them be. diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index e6423002c10b..c98ac63aae48 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -55,6 +55,18 @@ #define _PAGE_HIDDEN (_AT(pteval_t, 0)) #endif +/* + * The same hidden bit is used by kmemcheck, but since kmemcheck + * works on kernel pages while soft-dirty engine on user space, + * they do not conflict with each other. + */ + +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) +#else +#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) +#endif + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) #else diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 109a9dd5d454..be8269b00e2a 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -93,7 +93,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, struct pvclock_vsyscall_time_info { struct pvclock_vcpu_time_info pvti; - u32 migrate_count; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index a1df6e84691f..27811190cbd7 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -89,7 +89,6 @@ struct thread_info { #define TIF_FORK 18 /* ret_from_fork */ #define TIF_NOHZ 19 /* in adaptive nohz mode */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ -#define TIF_DEBUG 21 /* uses debug registers */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ @@ -113,7 +112,6 @@ struct thread_info { #define _TIF_IA32 (1 << TIF_IA32) #define _TIF_FORK (1 << TIF_FORK) #define _TIF_NOHZ (1 << TIF_NOHZ) -#define _TIF_DEBUG (1 << TIF_DEBUG) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) @@ -154,7 +152,7 @@ struct thread_info { (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) -#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) +#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) #define PREEMPT_ACTIVE 0x10000000 diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index f3e01a2cbaa1..966502d4682e 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -387,6 +387,7 @@ enum vmcs_field { #define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 #define VMX_EPT_EXTENT_CONTEXT 1 #define VMX_EPT_EXTENT_GLOBAL 2 +#define VMX_EPT_EXTENT_SHIFT 24 #define VMX_EPT_EXECUTE_ONLY_BIT (1ull) #define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6) @@ -394,6 +395,7 @@ enum vmcs_field { #define VMX_EPTP_WB_BIT (1ull << 14) #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) +#define VMX_EPT_INVEPT_BIT (1ull << 20) #define VMX_EPT_AD_BIT (1ull << 21) #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index d8d99222b36a..828a1565ba57 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -142,6 +142,8 @@ struct x86_cpuinit_ops { void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); }; +struct timespec; + /** * struct x86_platform_ops - platform specific runtime functions * @calibrate_tsc: calibrate TSC @@ -156,8 +158,8 @@ struct x86_cpuinit_ops { */ struct x86_platform_ops { unsigned long (*calibrate_tsc)(void); - unsigned long (*get_wallclock)(void); - int (*set_wallclock)(unsigned long nowtime); + void (*get_wallclock)(struct timespec *ts); + int (*set_wallclock)(const struct timespec *ts); void (*iommu_shutdown)(void); bool (*is_untracked_pat_range)(u64 start, u64 end); void (*nmi_init)(void); diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index d651082c7cf7..0e79420376eb 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -65,6 +65,7 @@ #define EXIT_REASON_EOI_INDUCED 45 #define EXIT_REASON_EPT_VIOLATION 48 #define EXIT_REASON_EPT_MISCONFIG 49 +#define EXIT_REASON_INVEPT 50 #define EXIT_REASON_PREEMPTION_TIMER 52 #define EXIT_REASON_WBINVD 54 #define EXIT_REASON_XSETBV 55 @@ -106,12 +107,13 @@ { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ + { EXIT_REASON_INVEPT, "INVEPT" }, \ + { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \ { EXIT_REASON_WBINVD, "WBINVD" }, \ { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ { EXIT_REASON_INVD, "INVD" }, \ - { EXIT_REASON_INVPCID, "INVPCID" }, \ - { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" } + { EXIT_REASON_INVPCID, "INVPCID" } #endif /* _UAPIVMX_H */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 230c8ea878e5..d81a972dd506 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -44,6 +44,7 @@ #include <asm/mpspec.h> #include <asm/smp.h> +#include "sleep.h" /* To include x86_acpi_suspend_lowlevel */ static int __initdata acpi_force = 0; u32 acpi_rsdt_forced; int acpi_disabled; @@ -559,6 +560,12 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, int (*__acpi_register_gsi)(struct device *dev, u32 gsi, int trigger, int polarity) = acpi_register_gsi_pic; +#ifdef CONFIG_ACPI_SLEEP +int (*acpi_suspend_lowlevel)(void) = x86_acpi_suspend_lowlevel; +#else +int (*acpi_suspend_lowlevel)(void); +#endif + /* * success: return IRQ number (>=0) * failure: return < 0 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index b44577bc9744..2a34aaf3c8f1 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -26,12 +26,12 @@ static char temp_stack[4096]; #endif /** - * acpi_suspend_lowlevel - save kernel state + * x86_acpi_suspend_lowlevel - save kernel state * * Create an identity mapped page table and copy the wakeup routine to * low memory. */ -int acpi_suspend_lowlevel(void) +int x86_acpi_suspend_lowlevel(void) { struct wakeup_header *header = (struct wakeup_header *) __va(real_mode_header->wakeup_header); diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index 67f59f8c6956..c9c2c982d5e4 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h @@ -15,3 +15,5 @@ extern unsigned long acpi_copy_wakeup_routine(unsigned long); extern void wakeup_long64(void); extern void do_suspend_lowlevel(void); + +extern int x86_acpi_suspend_lowlevel(void); diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 39cc7f7acab3..63092afb142e 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -25,6 +25,7 @@ #include <linux/kdebug.h> #include <linux/delay.h> #include <linux/crash_dump.h> +#include <linux/reboot.h> #include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_hub.h> @@ -36,7 +37,6 @@ #include <asm/ipi.h> #include <asm/smp.h> #include <asm/x86_init.h> -#include <asm/emergency-restart.h> #include <asm/nmi.h> /* BMC sets a bit this MMR non-zero before sending an NMI */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 5013a48d1aff..c587a8757227 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -90,7 +90,7 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) { u32 l, h; - int mbytes = num_physpages >> (20-PAGE_SHIFT); + int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); if (c->x86_model < 6) { /* Based on AMD doc 20734R - June 2000 */ diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 98f2083832eb..41e8e00a6637 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -55,12 +55,24 @@ struct thermal_state { struct _thermal_state package_power_limit; struct _thermal_state core_thresh0; struct _thermal_state core_thresh1; + struct _thermal_state pkg_thresh0; + struct _thermal_state pkg_thresh1; }; /* Callback to handle core threshold interrupts */ int (*platform_thermal_notify)(__u64 msr_val); EXPORT_SYMBOL(platform_thermal_notify); +/* Callback to handle core package threshold_interrupts */ +int (*platform_thermal_package_notify)(__u64 msr_val); +EXPORT_SYMBOL_GPL(platform_thermal_package_notify); + +/* Callback support of rate control, return true, if + * callback has rate control */ +bool (*platform_thermal_package_rate_control)(void); +EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control); + + static DEFINE_PER_CPU(struct thermal_state, thermal_state); static atomic_t therm_throt_en = ATOMIC_INIT(0); @@ -195,19 +207,25 @@ static int therm_throt_process(bool new_event, int event, int level) return 0; } -static int thresh_event_valid(int event) +static int thresh_event_valid(int level, int event) { struct _thermal_state *state; unsigned int this_cpu = smp_processor_id(); struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); u64 now = get_jiffies_64(); - state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1; + if (level == PACKAGE_LEVEL) + state = (event == 0) ? &pstate->pkg_thresh0 : + &pstate->pkg_thresh1; + else + state = (event == 0) ? &pstate->core_thresh0 : + &pstate->core_thresh1; if (time_before64(now, state->next_check)) return 0; state->next_check = now + CHECK_INTERVAL; + return 1; } @@ -322,6 +340,39 @@ device_initcall(thermal_throttle_init_device); #endif /* CONFIG_SYSFS */ +static void notify_package_thresholds(__u64 msr_val) +{ + bool notify_thres_0 = false; + bool notify_thres_1 = false; + + if (!platform_thermal_package_notify) + return; + + /* lower threshold check */ + if (msr_val & THERM_LOG_THRESHOLD0) + notify_thres_0 = true; + /* higher threshold check */ + if (msr_val & THERM_LOG_THRESHOLD1) + notify_thres_1 = true; + + if (!notify_thres_0 && !notify_thres_1) + return; + + if (platform_thermal_package_rate_control && + platform_thermal_package_rate_control()) { + /* Rate control is implemented in callback */ + platform_thermal_package_notify(msr_val); + return; + } + + /* lower threshold reached */ + if (notify_thres_0 && thresh_event_valid(PACKAGE_LEVEL, 0)) + platform_thermal_package_notify(msr_val); + /* higher threshold reached */ + if (notify_thres_1 && thresh_event_valid(PACKAGE_LEVEL, 1)) + platform_thermal_package_notify(msr_val); +} + static void notify_thresholds(__u64 msr_val) { /* check whether the interrupt handler is defined; @@ -331,10 +382,12 @@ static void notify_thresholds(__u64 msr_val) return; /* lower threshold reached */ - if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(0)) + if ((msr_val & THERM_LOG_THRESHOLD0) && + thresh_event_valid(CORE_LEVEL, 0)) platform_thermal_notify(msr_val); /* higher threshold reached */ - if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(1)) + if ((msr_val & THERM_LOG_THRESHOLD1) && + thresh_event_valid(CORE_LEVEL, 1)) platform_thermal_notify(msr_val); } @@ -360,6 +413,8 @@ static void intel_thermal_interrupt(void) if (this_cpu_has(X86_FEATURE_PTS)) { rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); + /* check violations of package thermal thresholds */ + notify_package_thresholds(msr_val); therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, PACKAGE_LEVEL); diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index ca22b73aaa25..f961de9964c7 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -51,9 +51,13 @@ #include <asm/e820.h> #include <asm/mtrr.h> #include <asm/msr.h> +#include <asm/pat.h> #include "mtrr.h" +/* arch_phys_wc_add returns an MTRR register index plus this offset. */ +#define MTRR_TO_PHYS_WC_OFFSET 1000 + u32 num_var_ranges; unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; @@ -525,6 +529,73 @@ int mtrr_del(int reg, unsigned long base, unsigned long size) } EXPORT_SYMBOL(mtrr_del); +/** + * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable + * @base: Physical base address + * @size: Size of region + * + * If PAT is available, this does nothing. If PAT is unavailable, it + * attempts to add a WC MTRR covering size bytes starting at base and + * logs an error if this fails. + * + * Drivers must store the return value to pass to mtrr_del_wc_if_needed, + * but drivers should not try to interpret that return value. + */ +int arch_phys_wc_add(unsigned long base, unsigned long size) +{ + int ret; + + if (pat_enabled) + return 0; /* Success! (We don't need to do anything.) */ + + ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true); + if (ret < 0) { + pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.", + (void *)base, (void *)(base + size - 1)); + return ret; + } + return ret + MTRR_TO_PHYS_WC_OFFSET; +} +EXPORT_SYMBOL(arch_phys_wc_add); + +/* + * arch_phys_wc_del - undoes arch_phys_wc_add + * @handle: Return value from arch_phys_wc_add + * + * This cleans up after mtrr_add_wc_if_needed. + * + * The API guarantees that mtrr_del_wc_if_needed(error code) and + * mtrr_del_wc_if_needed(0) do nothing. + */ +void arch_phys_wc_del(int handle) +{ + if (handle >= 1) { + WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET); + mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0); + } +} +EXPORT_SYMBOL(arch_phys_wc_del); + +/* + * phys_wc_to_mtrr_index - translates arch_phys_wc_add's return value + * @handle: Return value from arch_phys_wc_add + * + * This will turn the return value from arch_phys_wc_add into an mtrr + * index suitable for debugging. + * + * Note: There is no legitimate use for this function, except possibly + * in printk line. Alas there is an illegitimate use in some ancient + * drm ioctls. + */ +int phys_wc_to_mtrr_index(int handle) +{ + if (handle < MTRR_TO_PHYS_WC_OFFSET) + return -1; + else + return handle - MTRR_TO_PHYS_WC_OFFSET; +} +EXPORT_SYMBOL_GPL(phys_wc_to_mtrr_index); + /* * HACK ALERT! * These should be called implicitly, but we can't yet until all the initcall diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c index 0db655ef3918..639d1289b1ba 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c @@ -491,10 +491,8 @@ static struct perf_amd_iommu __perf_iommu = { static __init int amd_iommu_pc_init(void) { /* Make sure the IOMMU PC resource is available */ - if (!amd_iommu_pc_supported()) { - pr_err("perf: amd_iommu PMU not installed. No support!\n"); + if (!amd_iommu_pc_supported()) return -ENODEV; - } _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c index 7b3fe56b1c21..31f0f335ed22 100644 --- a/arch/x86/kernel/cpu/powerflags.c +++ b/arch/x86/kernel/cpu/powerflags.c @@ -11,10 +11,10 @@ const char *const x86_power_flags[32] = { "fid", /* frequency id control */ "vid", /* voltage id control */ "ttp", /* thermal trip */ - "tm", - "stc", - "100mhzsteps", - "hwpstate", + "tm", /* hardware thermal control */ + "stc", /* software thermal control */ + "100mhzsteps", /* 100 MHz multiplier control */ + "hwpstate", /* hardware P-state control */ "", /* tsc invariant mapped to constant_tsc */ "cpb", /* core performance boost */ "eff_freq_ro", /* Readonly aperf/mperf */ diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index b1581527a236..4934890e4db2 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -364,9 +364,7 @@ static void dt_add_ioapic_domain(unsigned int ioapic_num, * and assigned so we can keep the 1:1 mapping which the ioapic * is having. */ - ret = irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY); - if (ret) - pr_err("Error mapping legacy IRQs: %d\n", ret); + irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY); if (num > NR_IRQS_LEGACY) { ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY, diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 02f07634d265..f66ff162dce8 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -393,6 +393,9 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) unregister_hw_breakpoint(t->ptrace_bps[i]); t->ptrace_bps[i] = NULL; } + + t->debugreg6 = 0; + t->ptrace_dr7 = 0; } void hw_breakpoint_restore(void) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 3dd37ebd591b..1f354f4b602b 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -48,10 +48,9 @@ static struct pvclock_wall_clock wall_clock; * have elapsed since the hypervisor wrote the data. So we try to account for * that with system time */ -static unsigned long kvm_get_wallclock(void) +static void kvm_get_wallclock(struct timespec *now) { struct pvclock_vcpu_time_info *vcpu_time; - struct timespec ts; int low, high; int cpu; @@ -64,14 +63,12 @@ static unsigned long kvm_get_wallclock(void) cpu = smp_processor_id(); vcpu_time = &hv_clock[cpu].pvti; - pvclock_read_wallclock(&wall_clock, vcpu_time, &ts); + pvclock_read_wallclock(&wall_clock, vcpu_time, now); preempt_enable(); - - return ts.tv_sec; } -static int kvm_set_wallclock(unsigned long now) +static int kvm_set_wallclock(const struct timespec *now) { return -1; } diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 0920212e6159..ba77ebc2c353 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -111,7 +111,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 */ list_for_each_entry_rcu(a, &desc->head, list) { u64 before, delta, whole_msecs; - int decimal_msecs, thishandled; + int remainder_ns, decimal_msecs, thishandled; before = local_clock(); thishandled = a->handler(type, regs); @@ -123,8 +123,9 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 continue; nmi_longest_ns = delta; - whole_msecs = do_div(delta, (1000 * 1000)); - decimal_msecs = do_div(delta, 1000) % 1000; + whole_msecs = delta; + remainder_ns = do_div(whole_msecs, (1000 * 1000)); + decimal_msecs = remainder_ns / 1000; printk_ratelimited(KERN_INFO "INFO: NMI handler (%ps) took too long to run: " "%lld.%03d msecs\n", a->handler, whole_msecs, diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 29a8120e6fe8..7461f50d5bb1 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -601,30 +601,48 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) return dr7; } -static int -ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, - struct task_struct *tsk, int disabled) +static int ptrace_fill_bp_fields(struct perf_event_attr *attr, + int len, int type, bool disabled) +{ + int err, bp_len, bp_type; + + err = arch_bp_generic_fields(len, type, &bp_len, &bp_type); + if (!err) { + attr->bp_len = bp_len; + attr->bp_type = bp_type; + attr->disabled = disabled; + } + + return err; +} + +static struct perf_event * +ptrace_register_breakpoint(struct task_struct *tsk, int len, int type, + unsigned long addr, bool disabled) { - int err; - int gen_len, gen_type; struct perf_event_attr attr; + int err; - /* - * We should have at least an inactive breakpoint at this - * slot. It means the user is writing dr7 without having - * written the address register first - */ - if (!bp) - return -EINVAL; + ptrace_breakpoint_init(&attr); + attr.bp_addr = addr; - err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); + err = ptrace_fill_bp_fields(&attr, len, type, disabled); if (err) - return err; + return ERR_PTR(err); + + return register_user_hw_breakpoint(&attr, ptrace_triggered, + NULL, tsk); +} - attr = bp->attr; - attr.bp_len = gen_len; - attr.bp_type = gen_type; - attr.disabled = disabled; +static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, + int disabled) +{ + struct perf_event_attr attr = bp->attr; + int err; + + err = ptrace_fill_bp_fields(&attr, len, type, disabled); + if (err) + return err; return modify_user_hw_breakpoint(bp, &attr); } @@ -634,67 +652,50 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, */ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) { - struct thread_struct *thread = &(tsk->thread); + struct thread_struct *thread = &tsk->thread; unsigned long old_dr7; - int i, orig_ret = 0, rc = 0; - int enabled, second_pass = 0; - unsigned len, type; - struct perf_event *bp; - - if (ptrace_get_breakpoints(tsk) < 0) - return -ESRCH; + bool second_pass = false; + int i, rc, ret = 0; data &= ~DR_CONTROL_RESERVED; old_dr7 = ptrace_get_dr7(thread->ptrace_bps); + restore: - /* - * Loop through all the hardware breakpoints, making the - * appropriate changes to each. - */ + rc = 0; for (i = 0; i < HBP_NUM; i++) { - enabled = decode_dr7(data, i, &len, &type); - bp = thread->ptrace_bps[i]; - - if (!enabled) { - if (bp) { - /* - * Don't unregister the breakpoints right-away, - * unless all register_user_hw_breakpoint() - * requests have succeeded. This prevents - * any window of opportunity for debug - * register grabbing by other users. - */ - if (!second_pass) - continue; - - rc = ptrace_modify_breakpoint(bp, len, type, - tsk, 1); - if (rc) - break; + unsigned len, type; + bool disabled = !decode_dr7(data, i, &len, &type); + struct perf_event *bp = thread->ptrace_bps[i]; + + if (!bp) { + if (disabled) + continue; + + bp = ptrace_register_breakpoint(tsk, + len, type, 0, disabled); + if (IS_ERR(bp)) { + rc = PTR_ERR(bp); + break; } + + thread->ptrace_bps[i] = bp; continue; } - rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); + rc = ptrace_modify_breakpoint(bp, len, type, disabled); if (rc) break; } - /* - * Make a second pass to free the remaining unused breakpoints - * or to restore the original breakpoints if an error occurred. - */ - if (!second_pass) { - second_pass = 1; - if (rc < 0) { - orig_ret = rc; - data = old_dr7; - } + + /* Restore if the first pass failed, second_pass shouldn't fail. */ + if (rc && !WARN_ON(second_pass)) { + ret = rc; + data = old_dr7; + second_pass = true; goto restore; } - ptrace_put_breakpoints(tsk); - - return ((orig_ret < 0) ? orig_ret : rc); + return ret; } /* @@ -702,25 +703,17 @@ restore: */ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) { - struct thread_struct *thread = &(tsk->thread); + struct thread_struct *thread = &tsk->thread; unsigned long val = 0; if (n < HBP_NUM) { - struct perf_event *bp; + struct perf_event *bp = thread->ptrace_bps[n]; - if (ptrace_get_breakpoints(tsk) < 0) - return -ESRCH; - - bp = thread->ptrace_bps[n]; - if (!bp) - val = 0; - else + if (bp) val = bp->hw.info.address; - - ptrace_put_breakpoints(tsk); } else if (n == 6) { val = thread->debugreg6; - } else if (n == 7) { + } else if (n == 7) { val = thread->ptrace_dr7; } return val; @@ -729,29 +722,14 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, unsigned long addr) { - struct perf_event *bp; struct thread_struct *t = &tsk->thread; - struct perf_event_attr attr; + struct perf_event *bp = t->ptrace_bps[nr]; int err = 0; - if (ptrace_get_breakpoints(tsk) < 0) - return -ESRCH; - - if (!t->ptrace_bps[nr]) { - ptrace_breakpoint_init(&attr); - /* - * Put stub len and type to register (reserve) an inactive but - * correct bp - */ - attr.bp_addr = addr; - attr.bp_len = HW_BREAKPOINT_LEN_1; - attr.bp_type = HW_BREAKPOINT_W; - attr.disabled = 1; - - bp = register_user_hw_breakpoint(&attr, ptrace_triggered, - NULL, tsk); - + if (!bp) { /* + * Put stub len and type to create an inactive but correct bp. + * * CHECKME: the previous code returned -EIO if the addr wasn't * a valid task virtual addr. The new one will return -EINVAL in * this case. @@ -760,22 +738,20 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, * writing for the user. And anyway this is the previous * behaviour. */ - if (IS_ERR(bp)) { + bp = ptrace_register_breakpoint(tsk, + X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE, + addr, true); + if (IS_ERR(bp)) err = PTR_ERR(bp); - goto put; - } - - t->ptrace_bps[nr] = bp; + else + t->ptrace_bps[nr] = bp; } else { - bp = t->ptrace_bps[nr]; + struct perf_event_attr attr = bp->attr; - attr = bp->attr; attr.bp_addr = addr; err = modify_user_hw_breakpoint(bp, &attr); } -put: - ptrace_put_breakpoints(tsk); return err; } @@ -785,30 +761,20 @@ put: static int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val) { - struct thread_struct *thread = &(tsk->thread); - int rc = 0; - + struct thread_struct *thread = &tsk->thread; /* There are no DR4 or DR5 registers */ - if (n == 4 || n == 5) - return -EIO; + int rc = -EIO; - if (n == 6) { - thread->debugreg6 = val; - goto ret_path; - } if (n < HBP_NUM) { rc = ptrace_set_breakpoint_addr(tsk, n, val); - if (rc) - return rc; - } - /* All that's left is DR7 */ - if (n == 7) { + } else if (n == 6) { + thread->debugreg6 = val; + rc = 0; + } else if (n == 7) { rc = ptrace_write_dr7(tsk, val); if (!rc) thread->ptrace_dr7 = val; } - -ret_path: return rc; } diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 2cb9470ea85b..a16bae3f83b3 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -128,46 +128,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); } -static struct pvclock_vsyscall_time_info *pvclock_vdso_info; - -static struct pvclock_vsyscall_time_info * -pvclock_get_vsyscall_user_time_info(int cpu) -{ - if (!pvclock_vdso_info) { - BUG(); - return NULL; - } - - return &pvclock_vdso_info[cpu]; -} - -struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu) -{ - return &pvclock_get_vsyscall_user_time_info(cpu)->pvti; -} - #ifdef CONFIG_X86_64 -static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l, - void *v) -{ - struct task_migration_notifier *mn = v; - struct pvclock_vsyscall_time_info *pvti; - - pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu); - - /* this is NULL when pvclock vsyscall is not initialized */ - if (unlikely(pvti == NULL)) - return NOTIFY_DONE; - - pvti->migrate_count++; - - return NOTIFY_DONE; -} - -static struct notifier_block pvclock_migrate = { - .notifier_call = pvclock_task_migrate, -}; - /* * Initialize the generic pvclock vsyscall state. This will allocate * a/some page(s) for the per-vcpu pvclock information, set up a @@ -181,17 +142,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); - pvclock_vdso_info = i; - for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, __pa(i) + (idx*PAGE_SIZE), PAGE_KERNEL_VVAR); } - - register_task_migration_notifier(&pvclock_migrate); - return 0; } #endif diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 76fa1e9a2b39..563ed91e6faa 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -36,22 +36,6 @@ void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); static const struct desc_ptr no_idt = {}; -static int reboot_mode; -enum reboot_type reboot_type = BOOT_ACPI; -int reboot_force; - -/* - * This variable is used privately to keep track of whether or not - * reboot_type is still set to its default value (i.e., reboot= hasn't - * been set on the command line). This is needed so that we can - * suppress DMI scanning for reboot quirks. Without it, it's - * impossible to override a faulty reboot quirk without recompiling. - */ -static int reboot_default = 1; - -#ifdef CONFIG_SMP -static int reboot_cpu = -1; -#endif /* * This is set if we need to go through the 'emergency' path. @@ -64,79 +48,6 @@ static int reboot_emergency; bool port_cf9_safe = false; /* - * reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci] - * warm Don't set the cold reboot flag - * cold Set the cold reboot flag - * bios Reboot by jumping through the BIOS - * smp Reboot by executing reset on BSP or other CPU - * triple Force a triple fault (init) - * kbd Use the keyboard controller. cold reset (default) - * acpi Use the RESET_REG in the FADT - * efi Use efi reset_system runtime service - * pci Use the so-called "PCI reset register", CF9 - * force Avoid anything that could hang. - */ -static int __init reboot_setup(char *str) -{ - for (;;) { - /* - * Having anything passed on the command line via - * reboot= will cause us to disable DMI checking - * below. - */ - reboot_default = 0; - - switch (*str) { - case 'w': - reboot_mode = 0x1234; - break; - - case 'c': - reboot_mode = 0; - break; - -#ifdef CONFIG_SMP - case 's': - if (isdigit(*(str+1))) { - reboot_cpu = (int) (*(str+1) - '0'); - if (isdigit(*(str+2))) - reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0'); - } - /* - * We will leave sorting out the final value - * when we are ready to reboot, since we might not - * have detected BSP APIC ID or smp_num_cpu - */ - break; -#endif /* CONFIG_SMP */ - - case 'b': - case 'a': - case 'k': - case 't': - case 'e': - case 'p': - reboot_type = *str; - break; - - case 'f': - reboot_force = 1; - break; - } - - str = strchr(str, ','); - if (str) - str++; - else - break; - } - return 1; -} - -__setup("reboot=", reboot_setup); - - -/* * Reboot options and system auto-detection code provided by * Dell Inc. so their systems "just work". :-) */ @@ -536,6 +447,7 @@ static void native_machine_emergency_restart(void) int i; int attempt = 0; int orig_reboot_type = reboot_type; + unsigned short mode; if (reboot_emergency) emergency_vmx_disable_all(); @@ -543,7 +455,8 @@ static void native_machine_emergency_restart(void) tboot_shutdown(TB_SHUTDOWN_REBOOT); /* Tell the BIOS if we want cold or warm reboot */ - *((unsigned short *)__va(0x472)) = reboot_mode; + mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0; + *((unsigned short *)__va(0x472)) = mode; for (;;) { /* Could also try the reset bit in the Hammer NB */ @@ -585,7 +498,7 @@ static void native_machine_emergency_restart(void) case BOOT_EFI: if (efi_enabled(EFI_RUNTIME_SERVICES)) - efi.reset_system(reboot_mode ? + efi.reset_system(reboot_mode == REBOOT_WARM ? EFI_RESET_WARM : EFI_RESET_COLD, EFI_SUCCESS, 0, NULL); @@ -614,26 +527,10 @@ void native_machine_shutdown(void) { /* Stop the cpus and apics */ #ifdef CONFIG_SMP - - /* The boot cpu is always logical cpu 0 */ - int reboot_cpu_id = 0; - - /* See if there has been given a command line override */ - if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && - cpu_online(reboot_cpu)) - reboot_cpu_id = reboot_cpu; - - /* Make certain the cpu I'm about to reboot on is online */ - if (!cpu_online(reboot_cpu_id)) - reboot_cpu_id = smp_processor_id(); - - /* Make certain I only run on the appropriate processor */ - set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); - /* - * O.K Now that I'm on the appropriate processor, stop all of the - * others. Also disable the local irq to not receive the per-cpu - * timer interrupt which may trigger scheduler's load balance. + * Stop all of the others. Also disable the local irq to + * not receive the per-cpu timer interrupt which may trigger + * scheduler's load balance. */ local_irq_disable(); stop_other_cpus(); diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 198eb201ed3b..0aa29394ed6f 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -38,8 +38,9 @@ EXPORT_SYMBOL(rtc_lock); * jump to the next second precisely 500 ms later. Check the Motorola * MC146818A or Dallas DS12887 data sheet for details. */ -int mach_set_rtc_mmss(unsigned long nowtime) +int mach_set_rtc_mmss(const struct timespec *now) { + unsigned long nowtime = now->tv_sec; struct rtc_time tm; int retval = 0; @@ -58,7 +59,7 @@ int mach_set_rtc_mmss(unsigned long nowtime) return retval; } -unsigned long mach_get_cmos_time(void) +void mach_get_cmos_time(struct timespec *now) { unsigned int status, year, mon, day, hour, min, sec, century = 0; unsigned long flags; @@ -107,7 +108,8 @@ unsigned long mach_get_cmos_time(void) } else year += CMOS_YEARS_OFFS; - return mktime(year, mon, day, hour, min, sec); + now->tv_sec = mktime(year, mon, day, hour, min, sec); + now->tv_nsec = 0; } /* Routines for accessing the CMOS RAM/RTC. */ @@ -135,18 +137,13 @@ EXPORT_SYMBOL(rtc_cmos_write); int update_persistent_clock(struct timespec now) { - return x86_platform.set_wallclock(now.tv_sec); + return x86_platform.set_wallclock(&now); } /* not static: needed by APM */ void read_persistent_clock(struct timespec *ts) { - unsigned long retval; - - retval = x86_platform.get_wallclock(); - - ts->tv_sec = retval; - ts->tv_nsec = 0; + x86_platform.get_wallclock(ts); } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 56f7fcfe7fa2..e68709da8251 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1040,8 +1040,6 @@ void __init setup_arch(char **cmdline_p) /* max_low_pfn get updated here */ find_low_pfn_range(); #else - num_physpages = max_pfn; - check_x2apic(); /* How many end-of-memory variables you have, grandma! */ diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index f4fe0b8879e0..cdaa347dfcad 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -265,23 +265,30 @@ void smp_reschedule_interrupt(struct pt_regs *regs) */ } -void smp_trace_reschedule_interrupt(struct pt_regs *regs) +static inline void smp_entering_irq(void) { ack_APIC_irq(); + irq_enter(); +} + +void smp_trace_reschedule_interrupt(struct pt_regs *regs) +{ + /* + * Need to call irq_enter() before calling the trace point. + * __smp_reschedule_interrupt() calls irq_enter/exit() too (in + * scheduler_ipi(). This is OK, since those functions are allowed + * to nest. + */ + smp_entering_irq(); trace_reschedule_entry(RESCHEDULE_VECTOR); __smp_reschedule_interrupt(); trace_reschedule_exit(RESCHEDULE_VECTOR); + exiting_irq(); /* * KVM uses this interrupt to force a cpu out of guest mode */ } -static inline void call_function_entering_irq(void) -{ - ack_APIC_irq(); - irq_enter(); -} - static inline void __smp_call_function_interrupt(void) { generic_smp_call_function_interrupt(); @@ -290,14 +297,14 @@ static inline void __smp_call_function_interrupt(void) void smp_call_function_interrupt(struct pt_regs *regs) { - call_function_entering_irq(); + smp_entering_irq(); __smp_call_function_interrupt(); exiting_irq(); } void smp_trace_call_function_interrupt(struct pt_regs *regs) { - call_function_entering_irq(); + smp_entering_irq(); trace_call_function_entry(CALL_FUNCTION_VECTOR); __smp_call_function_interrupt(); trace_call_function_exit(CALL_FUNCTION_VECTOR); @@ -312,14 +319,14 @@ static inline void __smp_call_function_single_interrupt(void) void smp_call_function_single_interrupt(struct pt_regs *regs) { - call_function_entering_irq(); + smp_entering_irq(); __smp_call_function_single_interrupt(); exiting_irq(); } void smp_trace_call_function_single_interrupt(struct pt_regs *regs) { - call_function_entering_irq(); + smp_entering_irq(); trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); __smp_call_function_single_interrupt(); trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index afc11245827c..c98f05442325 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -79,16 +79,6 @@ static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) *((u32 *) (apic->regs + reg_off)) = val; } -static inline int apic_test_and_set_vector(int vec, void *bitmap) -{ - return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); -} - -static inline int apic_test_and_clear_vector(int vec, void *bitmap) -{ - return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); -} - static inline int apic_test_vector(int vec, void *bitmap) { return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); @@ -331,10 +321,10 @@ void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) } EXPORT_SYMBOL_GPL(kvm_apic_update_irr); -static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) +static inline void apic_set_irr(int vec, struct kvm_lapic *apic) { apic->irr_pending = true; - return apic_test_and_set_vector(vec, apic->regs + APIC_IRR); + apic_set_vector(vec, apic->regs + APIC_IRR); } static inline int apic_search_irr(struct kvm_lapic *apic) @@ -681,28 +671,21 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, if (unlikely(!apic_enabled(apic))) break; + result = 1; + if (dest_map) __set_bit(vcpu->vcpu_id, dest_map); - if (kvm_x86_ops->deliver_posted_interrupt) { - result = 1; + if (kvm_x86_ops->deliver_posted_interrupt) kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); - } else { - result = !apic_test_and_set_irr(vector, apic); - - if (!result) { - if (trig_mode) - apic_debug("level trig mode repeatedly " - "for vector %d", vector); - goto out; - } + else { + apic_set_irr(vector, apic); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); } -out: trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, - trig_mode, vector, !result); + trig_mode, vector, false); break; case APIC_DM_REMRD: diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0d094da49541..9651c9937588 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -132,8 +132,8 @@ module_param(dbg, bool, 0644); (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ * PT32_LEVEL_BITS))) - 1)) -#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ - | PT64_NX_MASK) +#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ + | shadow_x_mask | shadow_nx_mask) #define ACC_EXEC_MASK 1 #define ACC_WRITE_MASK PT_WRITABLE_MASK @@ -331,11 +331,6 @@ static int is_large_pte(u64 pte) return pte & PT_PAGE_SIZE_MASK; } -static int is_dirty_gpte(unsigned long pte) -{ - return pte & PT_DIRTY_MASK; -} - static int is_rmap_spte(u64 pte) { return is_shadow_present_pte(pte); @@ -2052,12 +2047,18 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) return __shadow_walk_next(iterator, *iterator->sptep); } -static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) +static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed) { u64 spte; + BUILD_BUG_ON(VMX_EPT_READABLE_MASK != PT_PRESENT_MASK || + VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); + spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | - shadow_user_mask | shadow_x_mask | shadow_accessed_mask; + shadow_user_mask | shadow_x_mask; + + if (accessed) + spte |= shadow_accessed_mask; mmu_spte_set(sptep, spte); } @@ -2574,14 +2575,6 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) mmu_free_roots(vcpu); } -static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) -{ - int bit7; - - bit7 = (gpte >> 7) & 1; - return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; -} - static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) { @@ -2594,26 +2587,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, return gfn_to_pfn_memslot_atomic(slot, gfn); } -static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *spte, - u64 gpte) -{ - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) - goto no_present; - - if (!is_present_gpte(gpte)) - goto no_present; - - if (!(gpte & PT_ACCESSED_MASK)) - goto no_present; - - return false; - -no_present: - drop_spte(vcpu->kvm, spte); - return true; -} - static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *start, u64 *end) @@ -2710,7 +2683,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, iterator.level - 1, 1, ACC_ALL, iterator.sptep); - link_shadow_page(iterator.sptep, sp); + link_shadow_page(iterator.sptep, sp, true); } } return emulate; @@ -2811,6 +2784,13 @@ exit: static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code) { /* + * Do not fix the mmio spte with invalid generation number which + * need to be updated by slow page fault path. + */ + if (unlikely(error_code & PFERR_RSVD_MASK)) + return false; + + /* * #PF can be fast only if the shadow page table is present and it * is caused by write-protect, that means we just need change the * W bit of the spte which can be done out of mmu-lock. @@ -3202,6 +3182,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) mmu_sync_roots(vcpu); spin_unlock(&vcpu->kvm->mmu_lock); } +EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, struct x86_exception *exception) @@ -3471,6 +3452,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) ++vcpu->stat.tlb_flush; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } +EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb); static void paging_new_cr3(struct kvm_vcpu *vcpu) { @@ -3494,18 +3476,6 @@ static void paging_free(struct kvm_vcpu *vcpu) nonpaging_free(vcpu); } -static inline void protect_clean_gpte(unsigned *access, unsigned gpte) -{ - unsigned mask; - - BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); - - mask = (unsigned)~ACC_WRITE_MASK; - /* Allow write access to dirty gptes */ - mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK; - *access &= mask; -} - static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, unsigned access, int *nr_present) { @@ -3523,16 +3493,6 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, return false; } -static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte) -{ - unsigned access; - - access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; - access &= ~(gpte >> PT64_NX_SHIFT); - - return access; -} - static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) { unsigned index; @@ -3542,6 +3502,11 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gp return mmu->last_pte_bitmap & (1 << index); } +#define PTTYPE_EPT 18 /* arbitrary */ +#define PTTYPE PTTYPE_EPT +#include "paging_tmpl.h" +#undef PTTYPE + #define PTTYPE 64 #include "paging_tmpl.h" #undef PTTYPE @@ -3556,6 +3521,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int maxphyaddr = cpuid_maxphyaddr(vcpu); u64 exb_bit_rsvd = 0; + context->bad_mt_xwr = 0; + if (!context->nx) exb_bit_rsvd = rsvd_bits(63, 63); switch (context->root_level) { @@ -3611,7 +3578,40 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, } } -static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) +static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) +{ + int maxphyaddr = cpuid_maxphyaddr(vcpu); + int pte; + + context->rsvd_bits_mask[0][3] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); + context->rsvd_bits_mask[0][2] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); + context->rsvd_bits_mask[0][1] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); + context->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); + + /* large page */ + context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; + context->rsvd_bits_mask[1][2] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); + context->rsvd_bits_mask[1][1] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); + context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; + + for (pte = 0; pte < 64; pte++) { + int rwx_bits = pte & 7; + int mt = pte >> 3; + if (mt == 0x2 || mt == 0x3 || mt == 0x7 || + rwx_bits == 0x2 || rwx_bits == 0x6 || + (rwx_bits == 0x4 && !execonly)) + context->bad_mt_xwr |= (1ull << pte); + } +} + +static void update_permission_bitmask(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu, bool ept) { unsigned bit, byte, pfec; u8 map; @@ -3629,12 +3629,16 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu w = bit & ACC_WRITE_MASK; u = bit & ACC_USER_MASK; - /* Not really needed: !nx will cause pte.nx to fault */ - x |= !mmu->nx; - /* Allow supervisor writes if !cr0.wp */ - w |= !is_write_protection(vcpu) && !uf; - /* Disallow supervisor fetches of user code if cr4.smep */ - x &= !(smep && u && !uf); + if (!ept) { + /* Not really needed: !nx will cause pte.nx to fault */ + x |= !mmu->nx; + /* Allow supervisor writes if !cr0.wp */ + w |= !is_write_protection(vcpu) && !uf; + /* Disallow supervisor fetches of user code if cr4.smep */ + x &= !(smep && u && !uf); + } else + /* Not really needed: no U/S accesses on ept */ + u = 1; fault = (ff && !x) || (uf && !u) || (wf && !w); map |= fault << bit; @@ -3669,7 +3673,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, context->root_level = level; reset_rsvds_bits_mask(vcpu, context); - update_permission_bitmask(vcpu, context); + update_permission_bitmask(vcpu, context, false); update_last_pte_bitmap(vcpu, context); ASSERT(is_pae(vcpu)); @@ -3699,7 +3703,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu, context->root_level = PT32_ROOT_LEVEL; reset_rsvds_bits_mask(vcpu, context); - update_permission_bitmask(vcpu, context); + update_permission_bitmask(vcpu, context, false); update_last_pte_bitmap(vcpu, context); context->new_cr3 = paging_new_cr3; @@ -3761,7 +3765,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->gva_to_gpa = paging32_gva_to_gpa; } - update_permission_bitmask(vcpu, context); + update_permission_bitmask(vcpu, context, false); update_last_pte_bitmap(vcpu, context); return 0; @@ -3793,6 +3797,33 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); +int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, + bool execonly) +{ + ASSERT(vcpu); + ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); + + context->shadow_root_level = kvm_x86_ops->get_tdp_level(); + + context->nx = true; + context->new_cr3 = paging_new_cr3; + context->page_fault = ept_page_fault; + context->gva_to_gpa = ept_gva_to_gpa; + context->sync_page = ept_sync_page; + context->invlpg = ept_invlpg; + context->update_pte = ept_update_pte; + context->free = paging_free; + context->root_level = context->shadow_root_level; + context->root_hpa = INVALID_PAGE; + context->direct_map = false; + + update_permission_bitmask(vcpu, context, true); + reset_rsvds_bits_mask_ept(vcpu, context, execonly); + + return 0; +} +EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); + static int init_kvm_softmmu(struct kvm_vcpu *vcpu) { int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); @@ -3840,7 +3871,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) g_context->gva_to_gpa = paging32_gva_to_gpa_nested; } - update_permission_bitmask(vcpu, g_context); + update_permission_bitmask(vcpu, g_context, false); update_last_pte_bitmap(vcpu, g_context); return 0; @@ -3916,8 +3947,8 @@ static bool need_remote_flush(u64 old, u64 new) return true; if ((old ^ new) & PT64_BASE_ADDR_MASK) return true; - old ^= PT64_NX_MASK; - new ^= PT64_NX_MASK; + old ^= shadow_nx_mask; + new ^= shadow_nx_mask; return (old & ~new & PT64_PERM_MASK) != 0; } @@ -4175,7 +4206,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, switch (er) { case EMULATE_DONE: return 1; - case EMULATE_DO_MMIO: + case EMULATE_USER_EXIT: ++vcpu->stat.mmio_exits; /* fall through */ case EMULATE_FAIL: @@ -4383,11 +4414,8 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) /* * The very rare case: if the generation-number is round, * zap all shadow pages. - * - * The max value is MMIO_MAX_GEN - 1 since it is not called - * when mark memslot invalid. */ - if (unlikely(kvm_current_mmio_generation(kvm) >= (MMIO_MAX_GEN - 1))) { + if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) { printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n"); kvm_mmu_invalidate_zap_all_pages(kvm); } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 5b59c573aba7..77e044a0f5f7 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -71,6 +71,8 @@ enum { int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); +int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, + bool execonly); static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) { diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 7769699d48a8..043330159179 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -23,6 +23,13 @@ * so the code in this file is compiled twice, once per pte size. */ +/* + * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro + * uses for EPT without A/D paging type. + */ +extern u64 __pure __using_nonexistent_pte_bit(void) + __compiletime_error("wrong use of PT_GUEST_(DIRTY|ACCESS)_SHIFT"); + #if PTTYPE == 64 #define pt_element_t u64 #define guest_walker guest_walker64 @@ -32,6 +39,10 @@ #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) #define PT_INDEX(addr, level) PT64_INDEX(addr, level) #define PT_LEVEL_BITS PT64_LEVEL_BITS + #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK + #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK + #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT + #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT #ifdef CONFIG_X86_64 #define PT_MAX_FULL_LEVELS 4 #define CMPXCHG cmpxchg @@ -49,7 +60,26 @@ #define PT_INDEX(addr, level) PT32_INDEX(addr, level) #define PT_LEVEL_BITS PT32_LEVEL_BITS #define PT_MAX_FULL_LEVELS 2 + #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK + #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK + #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT + #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT #define CMPXCHG cmpxchg +#elif PTTYPE == PTTYPE_EPT + #define pt_element_t u64 + #define guest_walker guest_walkerEPT + #define FNAME(name) ept_##name + #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK + #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) + #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) + #define PT_INDEX(addr, level) PT64_INDEX(addr, level) + #define PT_LEVEL_BITS PT64_LEVEL_BITS + #define PT_GUEST_ACCESSED_MASK 0 + #define PT_GUEST_DIRTY_MASK 0 + #define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit() + #define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit() + #define CMPXCHG cmpxchg64 + #define PT_MAX_FULL_LEVELS 4 #else #error Invalid PTTYPE value #endif @@ -80,6 +110,40 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; } +static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) +{ + unsigned mask; + + /* dirty bit is not supported, so no need to track it */ + if (!PT_GUEST_DIRTY_MASK) + return; + + BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); + + mask = (unsigned)~ACC_WRITE_MASK; + /* Allow write access to dirty gptes */ + mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & + PT_WRITABLE_MASK; + *access &= mask; +} + +static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) +{ + int bit7 = (gpte >> 7) & 1, low6 = gpte & 0x3f; + + return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) | + ((mmu->bad_mt_xwr & (1ull << low6)) != 0); +} + +static inline int FNAME(is_present_gpte)(unsigned long pte) +{ +#if PTTYPE != PTTYPE_EPT + return is_present_gpte(pte); +#else + return pte & 7; +#endif +} + static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, pt_element_t __user *ptep_user, unsigned index, pt_element_t orig_pte, pt_element_t new_pte) @@ -103,6 +167,42 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, return (ret != orig_pte); } +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, u64 *spte, + u64 gpte) +{ + if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) + goto no_present; + + if (!FNAME(is_present_gpte)(gpte)) + goto no_present; + + /* if accessed bit is not supported prefetch non accessed gpte */ + if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK)) + goto no_present; + + return false; + +no_present: + drop_spte(vcpu->kvm, spte); + return true; +} + +static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) +{ + unsigned access; +#if PTTYPE == PTTYPE_EPT + access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) | + ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) | + ACC_USER_MASK; +#else + access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; + access &= ~(gpte >> PT64_NX_SHIFT); +#endif + + return access; +} + static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct guest_walker *walker, @@ -114,18 +214,23 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, gfn_t table_gfn; int ret; + /* dirty/accessed bits are not supported, so no need to update them */ + if (!PT_GUEST_DIRTY_MASK) + return 0; + for (level = walker->max_level; level >= walker->level; --level) { pte = orig_pte = walker->ptes[level - 1]; table_gfn = walker->table_gfn[level - 1]; ptep_user = walker->ptep_user[level - 1]; index = offset_in_page(ptep_user) / sizeof(pt_element_t); - if (!(pte & PT_ACCESSED_MASK)) { + if (!(pte & PT_GUEST_ACCESSED_MASK)) { trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); - pte |= PT_ACCESSED_MASK; + pte |= PT_GUEST_ACCESSED_MASK; } - if (level == walker->level && write_fault && !is_dirty_gpte(pte)) { + if (level == walker->level && write_fault && + !(pte & PT_GUEST_DIRTY_MASK)) { trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); - pte |= PT_DIRTY_MASK; + pte |= PT_GUEST_DIRTY_MASK; } if (pte == orig_pte) continue; @@ -170,7 +275,7 @@ retry_walk: if (walker->level == PT32E_ROOT_LEVEL) { pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); trace_kvm_mmu_paging_element(pte, walker->level); - if (!is_present_gpte(pte)) + if (!FNAME(is_present_gpte)(pte)) goto error; --walker->level; } @@ -179,7 +284,7 @@ retry_walk: ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0); - accessed_dirty = PT_ACCESSED_MASK; + accessed_dirty = PT_GUEST_ACCESSED_MASK; pt_access = pte_access = ACC_ALL; ++walker->level; @@ -215,17 +320,17 @@ retry_walk: trace_kvm_mmu_paging_element(pte, walker->level); - if (unlikely(!is_present_gpte(pte))) + if (unlikely(!FNAME(is_present_gpte)(pte))) goto error; - if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte, - walker->level))) { + if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, + walker->level))) { errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK; goto error; } accessed_dirty &= pte; - pte_access = pt_access & gpte_access(vcpu, pte); + pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); walker->ptes[walker->level - 1] = pte; } while (!is_last_gpte(mmu, walker->level, pte)); @@ -248,13 +353,15 @@ retry_walk: walker->gfn = real_gpa >> PAGE_SHIFT; if (!write_fault) - protect_clean_gpte(&pte_access, pte); + FNAME(protect_clean_gpte)(&pte_access, pte); else /* - * On a write fault, fold the dirty bit into accessed_dirty by - * shifting it one place right. + * On a write fault, fold the dirty bit into accessed_dirty. + * For modes without A/D bits support accessed_dirty will be + * always clear. */ - accessed_dirty &= pte >> (PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT); + accessed_dirty &= pte >> + (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); if (unlikely(!accessed_dirty)) { ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); @@ -279,6 +386,25 @@ error: walker->fault.vector = PF_VECTOR; walker->fault.error_code_valid = true; walker->fault.error_code = errcode; + +#if PTTYPE == PTTYPE_EPT + /* + * Use PFERR_RSVD_MASK in error_code to to tell if EPT + * misconfiguration requires to be injected. The detection is + * done by is_rsvd_bits_set() above. + * + * We set up the value of exit_qualification to inject: + * [2:0] - Derive from [2:0] of real exit_qualification at EPT violation + * [5:3] - Calculated by the page walk of the guest EPT page tables + * [7:8] - Derived from [7:8] of real exit_qualification + * + * The other bits are set to 0. + */ + if (!(errcode & PFERR_RSVD_MASK)) { + vcpu->arch.exit_qualification &= 0x187; + vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3; + } +#endif walker->fault.address = addr; walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; @@ -293,6 +419,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker, access); } +#if PTTYPE != PTTYPE_EPT static int FNAME(walk_addr_nested)(struct guest_walker *walker, struct kvm_vcpu *vcpu, gva_t addr, u32 access) @@ -300,6 +427,7 @@ static int FNAME(walk_addr_nested)(struct guest_walker *walker, return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, addr, access); } +#endif static bool FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, @@ -309,14 +437,14 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, gfn_t gfn; pfn_t pfn; - if (prefetch_invalid_gpte(vcpu, sp, spte, gpte)) + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) return false; pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); gfn = gpte_to_gfn(gpte); - pte_access = sp->role.access & gpte_access(vcpu, gpte); - protect_clean_gpte(&pte_access, gpte); + pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); + FNAME(protect_clean_gpte)(&pte_access, gpte); pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, no_dirty_log && (pte_access & ACC_WRITE_MASK)); if (is_error_pfn(pfn)) @@ -446,7 +574,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, goto out_gpte_changed; if (sp) - link_shadow_page(it.sptep, sp); + link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK); } for (; @@ -466,7 +594,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, true, direct_access, it.sptep); - link_shadow_page(it.sptep, sp); + link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK); } clear_sp_write_flooding_count(it.sptep); @@ -727,6 +855,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, return gpa; } +#if PTTYPE != PTTYPE_EPT static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, struct x86_exception *exception) @@ -745,6 +874,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, return gpa; } +#endif /* * Using the cached information from sp->gfns is safe because: @@ -785,15 +915,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) sizeof(pt_element_t))) return -EINVAL; - if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) { + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { vcpu->kvm->tlbs_dirty++; continue; } gfn = gpte_to_gfn(gpte); pte_access = sp->role.access; - pte_access &= gpte_access(vcpu, gpte); - protect_clean_gpte(&pte_access, gpte); + pte_access &= FNAME(gpte_access)(vcpu, gpte); + FNAME(protect_clean_gpte)(&pte_access, gpte); if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access, &nr_present)) @@ -830,3 +960,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) #undef gpte_to_gfn #undef gpte_to_gfn_lvl #undef CMPXCHG +#undef PT_GUEST_ACCESSED_MASK +#undef PT_GUEST_DIRTY_MASK +#undef PT_GUEST_DIRTY_SHIFT +#undef PT_GUEST_ACCESSED_SHIFT diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index c53e797e7369..5c4f63151b4d 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -160,7 +160,7 @@ static void stop_counter(struct kvm_pmc *pmc) static void reprogram_counter(struct kvm_pmc *pmc, u32 type, unsigned config, bool exclude_user, bool exclude_kernel, - bool intr) + bool intr, bool in_tx, bool in_tx_cp) { struct perf_event *event; struct perf_event_attr attr = { @@ -173,6 +173,10 @@ static void reprogram_counter(struct kvm_pmc *pmc, u32 type, .exclude_kernel = exclude_kernel, .config = config, }; + if (in_tx) + attr.config |= HSW_IN_TX; + if (in_tx_cp) + attr.config |= HSW_IN_TX_CHECKPOINTED; attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); @@ -226,7 +230,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | ARCH_PERFMON_EVENTSEL_INV | - ARCH_PERFMON_EVENTSEL_CMASK))) { + ARCH_PERFMON_EVENTSEL_CMASK | + HSW_IN_TX | + HSW_IN_TX_CHECKPOINTED))) { config = find_arch_event(&pmc->vcpu->arch.pmu, event_select, unit_mask); if (config != PERF_COUNT_HW_MAX) @@ -239,7 +245,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) reprogram_counter(pmc, type, config, !(eventsel & ARCH_PERFMON_EVENTSEL_USR), !(eventsel & ARCH_PERFMON_EVENTSEL_OS), - eventsel & ARCH_PERFMON_EVENTSEL_INT); + eventsel & ARCH_PERFMON_EVENTSEL_INT, + (eventsel & HSW_IN_TX), + (eventsel & HSW_IN_TX_CHECKPOINTED)); } static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx) @@ -256,7 +264,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx) arch_events[fixed_pmc_events[idx]].event_type, !(en & 0x2), /* exclude user */ !(en & 0x1), /* exclude kernel */ - pmi); + pmi, false, false); } static inline u8 fixed_en_pmi(u64 ctrl, int idx) @@ -408,7 +416,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { if (data == pmc->eventsel) return 0; - if (!(data & 0xffffffff00200000ull)) { + if (!(data & pmu->reserved_bits)) { reprogram_gp_counter(pmc, data); return 0; } @@ -450,6 +458,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_GP] = 0; pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->version = 0; + pmu->reserved_bits = 0xffffffff00200000ull; entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); if (!entry) @@ -478,6 +487,12 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl; + + entry = kvm_find_cpuid_entry(vcpu, 7, 0); + if (entry && + (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) && + (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) + pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; } void kvm_pmu_init(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 064d0be67ecc..57b4e129891a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -373,6 +373,7 @@ struct nested_vmx { * we must keep them pinned while L2 runs. */ struct page *apic_access_page; + u64 msr_ia32_feature_control; }; #define POSTED_INTR_ON 0 @@ -711,10 +712,10 @@ static void nested_release_page_clean(struct page *page) kvm_release_page_clean(page); } +static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); -static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); @@ -1039,12 +1040,16 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) (vmcs12->secondary_vm_exec_control & bit); } -static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12, - struct kvm_vcpu *vcpu) +static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; } +static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); +} + static inline bool is_exception(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) @@ -2155,6 +2160,7 @@ static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high; static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; static u32 nested_vmx_misc_low, nested_vmx_misc_high; +static u32 nested_vmx_ept_caps; static __init void nested_vmx_setup_ctls_msrs(void) { /* @@ -2190,14 +2196,17 @@ static __init void nested_vmx_setup_ctls_msrs(void) * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and * 17 must be 1. */ + rdmsr(MSR_IA32_VMX_EXIT_CTLS, + nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high); nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */ + nested_vmx_exit_ctls_high &= #ifdef CONFIG_X86_64 - nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE; -#else - nested_vmx_exit_ctls_high = 0; + VM_EXIT_HOST_ADDR_SPACE_SIZE | #endif - nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; + VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; + nested_vmx_exit_ctls_high |= (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | + VM_EXIT_LOAD_IA32_EFER); /* entry controls */ rdmsr(MSR_IA32_VMX_ENTRY_CTLS, @@ -2205,8 +2214,12 @@ static __init void nested_vmx_setup_ctls_msrs(void) /* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */ nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; nested_vmx_entry_ctls_high &= - VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE; - nested_vmx_entry_ctls_high |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; +#ifdef CONFIG_X86_64 + VM_ENTRY_IA32E_MODE | +#endif + VM_ENTRY_LOAD_IA32_PAT; + nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | + VM_ENTRY_LOAD_IA32_EFER); /* cpu-based controls */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, @@ -2241,6 +2254,22 @@ static __init void nested_vmx_setup_ctls_msrs(void) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_WBINVD_EXITING; + if (enable_ept) { + /* nested EPT: emulate EPT also to L1 */ + nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; + nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | + VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT; + nested_vmx_ept_caps &= vmx_capability.ept; + /* + * Since invept is completely emulated we support both global + * and context invalidation independent of what host cpu + * supports + */ + nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | + VMX_EPT_EXTENT_CONTEXT_BIT; + } else + nested_vmx_ept_caps = 0; + /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high); nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK | @@ -2282,8 +2311,11 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) switch (msr_index) { case MSR_IA32_FEATURE_CONTROL: - *pdata = 0; - break; + if (nested_vmx_allowed(vcpu)) { + *pdata = to_vmx(vcpu)->nested.msr_ia32_feature_control; + break; + } + return 0; case MSR_IA32_VMX_BASIC: /* * This MSR reports some information about VMX support. We @@ -2346,8 +2378,8 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) nested_vmx_secondary_ctls_high); break; case MSR_IA32_VMX_EPT_VPID_CAP: - /* Currently, no nested ept or nested vpid */ - *pdata = 0; + /* Currently, no nested vpid support */ + *pdata = nested_vmx_ept_caps; break; default: return 0; @@ -2356,14 +2388,24 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) return 1; } -static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) +static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { + u32 msr_index = msr_info->index; + u64 data = msr_info->data; + bool host_initialized = msr_info->host_initiated; + if (!nested_vmx_allowed(vcpu)) return 0; - if (msr_index == MSR_IA32_FEATURE_CONTROL) - /* TODO: the right thing. */ + if (msr_index == MSR_IA32_FEATURE_CONTROL) { + if (!host_initialized && + to_vmx(vcpu)->nested.msr_ia32_feature_control + & FEATURE_CONTROL_LOCKED) + return 0; + to_vmx(vcpu)->nested.msr_ia32_feature_control = data; return 1; + } + /* * No need to treat VMX capability MSRs specially: If we don't handle * them, handle_wrmsr will #GP(0), which is correct (they are readonly) @@ -2494,7 +2536,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; /* Otherwise falls through */ default: - if (vmx_set_vmx_msr(vcpu, msr_index, data)) + if (vmx_set_vmx_msr(vcpu, msr_info)) break; msr = find_msr_entry(vmx, msr_index); if (msr) { @@ -5302,9 +5344,13 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) /* It is a write fault? */ error_code = exit_qualification & (1U << 1); + /* It is a fetch fault? */ + error_code |= (exit_qualification & (1U << 2)) << 2; /* ept page table is present? */ error_code |= (exit_qualification >> 3) & 0x1; + vcpu->arch.exit_qualification = exit_qualification; + return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } @@ -5438,7 +5484,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); - if (err == EMULATE_DO_MMIO) { + if (err == EMULATE_USER_EXIT) { ret = 0; goto out; } @@ -5567,8 +5613,47 @@ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) free_loaded_vmcs(&vmx->vmcs01); } +/* + * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), + * set the success or error code of an emulated VMX instruction, as specified + * by Vol 2B, VMX Instruction Reference, "Conventions". + */ +static void nested_vmx_succeed(struct kvm_vcpu *vcpu) +{ + vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) + & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | + X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); +} + +static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) +{ + vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) + & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | + X86_EFLAGS_SF | X86_EFLAGS_OF)) + | X86_EFLAGS_CF); +} + static void nested_vmx_failValid(struct kvm_vcpu *vcpu, - u32 vm_instruction_error); + u32 vm_instruction_error) +{ + if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { + /* + * failValid writes the error number to the current VMCS, which + * can't be done there isn't a current VMCS. + */ + nested_vmx_failInvalid(vcpu); + return; + } + vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) + & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | + X86_EFLAGS_SF | X86_EFLAGS_OF)) + | X86_EFLAGS_ZF); + get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; + /* + * We don't need to force a shadow sync because + * VM_INSTRUCTION_ERROR is not shadowed + */ +} /* * Emulate the VMXON instruction. @@ -5583,6 +5668,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu) struct kvm_segment cs; struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; + const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED + | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; /* The Intel VMX Instruction Reference lists a bunch of bits that * are prerequisite to running VMXON, most notably cr4.VMXE must be @@ -5611,6 +5698,13 @@ static int handle_vmon(struct kvm_vcpu *vcpu) skip_emulated_instruction(vcpu); return 1; } + + if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES) + != VMXON_NEEDED_FEATURES) { + kvm_inject_gp(vcpu, 0); + return 1; + } + if (enable_shadow_vmcs) { shadow_vmcs = alloc_vmcs(); if (!shadow_vmcs) @@ -5628,6 +5722,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) vmx->nested.vmxon = true; skip_emulated_instruction(vcpu); + nested_vmx_succeed(vcpu); return 1; } @@ -5712,6 +5807,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) return 1; free_nested(to_vmx(vcpu)); skip_emulated_instruction(vcpu); + nested_vmx_succeed(vcpu); return 1; } @@ -5768,48 +5864,6 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, return 0; } -/* - * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), - * set the success or error code of an emulated VMX instruction, as specified - * by Vol 2B, VMX Instruction Reference, "Conventions". - */ -static void nested_vmx_succeed(struct kvm_vcpu *vcpu) -{ - vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) - & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | - X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); -} - -static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) -{ - vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) - & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | - X86_EFLAGS_SF | X86_EFLAGS_OF)) - | X86_EFLAGS_CF); -} - -static void nested_vmx_failValid(struct kvm_vcpu *vcpu, - u32 vm_instruction_error) -{ - if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { - /* - * failValid writes the error number to the current VMCS, which - * can't be done there isn't a current VMCS. - */ - nested_vmx_failInvalid(vcpu); - return; - } - vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) - & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | - X86_EFLAGS_SF | X86_EFLAGS_OF)) - | X86_EFLAGS_ZF); - get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; - /* - * We don't need to force a shadow sync because - * VM_INSTRUCTION_ERROR is not shadowed - */ -} - /* Emulate the VMCLEAR instruction */ static int handle_vmclear(struct kvm_vcpu *vcpu) { @@ -5972,8 +6026,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) unsigned long field; u64 field_value; struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; - unsigned long *fields = (unsigned long *)shadow_read_write_fields; - int num_fields = max_shadow_read_write_fields; + const unsigned long *fields = shadow_read_write_fields; + const int num_fields = max_shadow_read_write_fields; vmcs_load(shadow_vmcs); @@ -6002,12 +6056,11 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) { - unsigned long *fields[] = { - (unsigned long *)shadow_read_write_fields, - (unsigned long *)shadow_read_only_fields + const unsigned long *fields[] = { + shadow_read_write_fields, + shadow_read_only_fields }; - int num_lists = ARRAY_SIZE(fields); - int max_fields[] = { + const int max_fields[] = { max_shadow_read_write_fields, max_shadow_read_only_fields }; @@ -6018,7 +6071,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) vmcs_load(shadow_vmcs); - for (q = 0; q < num_lists; q++) { + for (q = 0; q < ARRAY_SIZE(fields); q++) { for (i = 0; i < max_fields[q]; i++) { field = fields[q][i]; vmcs12_read_any(&vmx->vcpu, field, &field_value); @@ -6248,6 +6301,74 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) return 1; } +/* Emulate the INVEPT instruction */ +static int handle_invept(struct kvm_vcpu *vcpu) +{ + u32 vmx_instruction_info, types; + unsigned long type; + gva_t gva; + struct x86_exception e; + struct { + u64 eptp, gpa; + } operand; + u64 eptp_mask = ((1ull << 51) - 1) & PAGE_MASK; + + if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) || + !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf); + + types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; + + if (!(types & (1UL << type))) { + nested_vmx_failValid(vcpu, + VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + return 1; + } + + /* According to the Intel VMX instruction reference, the memory + * operand is read even if it isn't needed (e.g., for type==global) + */ + if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), + vmx_instruction_info, &gva)) + return 1; + if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, + sizeof(operand), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + + switch (type) { + case VMX_EPT_EXTENT_CONTEXT: + if ((operand.eptp & eptp_mask) != + (nested_ept_get_cr3(vcpu) & eptp_mask)) + break; + case VMX_EPT_EXTENT_GLOBAL: + kvm_mmu_sync_roots(vcpu); + kvm_mmu_flush_tlb(vcpu); + nested_vmx_succeed(vcpu); + break; + default: + BUG_ON(1); + break; + } + + skip_emulated_instruction(vcpu); + return 1; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -6292,6 +6413,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op, + [EXIT_REASON_INVEPT] = handle_invept, }; static const int kvm_vmx_max_exit_handlers = @@ -6518,6 +6640,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: + case EXIT_REASON_INVEPT: /* * VMX instructions trap unconditionally. This allows L1 to * emulate them for its L2 guest, i.e., allows 3-level nesting! @@ -6550,7 +6673,20 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); case EXIT_REASON_EPT_VIOLATION: + /* + * L0 always deals with the EPT violation. If nested EPT is + * used, and the nested mmu code discovers that the address is + * missing in the guest EPT table (EPT12), the EPT violation + * will be injected with nested_ept_inject_page_fault() + */ + return 0; case EXIT_REASON_EPT_MISCONFIG: + /* + * L2 never uses directly L1's EPT, but rather L0's own EPT + * table (shadow on EPT) or a merged EPT table that L0 built + * (EPT on EPT). So any problems with the structure of the + * table is L0's fault. + */ return 0; case EXIT_REASON_PREEMPTION_TIMER: return vmcs12->pin_based_vm_exec_control & @@ -6638,7 +6774,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( - get_vmcs12(vcpu), vcpu)))) { + get_vmcs12(vcpu))))) { if (vmx_interrupt_allowed(vcpu)) { vmx->soft_vnmi_blocked = 0; } else if (vmx->vnmi_blocked_time > 1000000000LL && @@ -7326,6 +7462,48 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) entry->ecx |= bit(X86_FEATURE_VMX); } +static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, + struct x86_exception *fault) +{ + struct vmcs12 *vmcs12; + nested_vmx_vmexit(vcpu); + vmcs12 = get_vmcs12(vcpu); + + if (fault->error_code & PFERR_RSVD_MASK) + vmcs12->vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; + else + vmcs12->vm_exit_reason = EXIT_REASON_EPT_VIOLATION; + vmcs12->exit_qualification = vcpu->arch.exit_qualification; + vmcs12->guest_physical_address = fault->address; +} + +/* Callbacks for nested_ept_init_mmu_context: */ + +static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) +{ + /* return the page table to be shadowed - in our case, EPT12 */ + return get_vmcs12(vcpu)->ept_pointer; +} + +static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) +{ + int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu, + nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT); + + vcpu->arch.mmu.set_cr3 = vmx_set_cr3; + vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; + vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; + + vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; + + return r; +} + +static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) +{ + vcpu->arch.walk_mmu = &vcpu->arch.mmu; +} + /* * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it @@ -7388,7 +7566,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmcs12->guest_interruptibility_info); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); - vmcs_writel(GUEST_RFLAGS, vmcs12->guest_rflags); + vmx_set_rflags(vcpu, vmcs12->guest_rflags); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, vmcs12->guest_pending_dbg_exceptions); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); @@ -7508,15 +7686,24 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); - /* Note: IA32_MODE, LOAD_IA32_EFER are modified by vmx_set_efer below */ - vmcs_write32(VM_EXIT_CONTROLS, - vmcs12->vm_exit_controls | vmcs_config.vmexit_ctrl); - vmcs_write32(VM_ENTRY_CONTROLS, vmcs12->vm_entry_controls | + /* L2->L1 exit controls are emulated - the hardware exit is to L0 so + * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER + * bits are further modified by vmx_set_efer() below. + */ + vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); + + /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are + * emulated by vmx_set_efer(), below. + */ + vmcs_write32(VM_ENTRY_CONTROLS, + (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & + ~VM_ENTRY_IA32E_MODE) | (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); - if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); - else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) + vcpu->arch.pat = vmcs12->guest_ia32_pat; + } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); @@ -7538,6 +7725,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmx_flush_tlb(vcpu); } + if (nested_cpu_has_ept(vmcs12)) { + kvm_mmu_unload(vcpu); + nested_ept_init_mmu_context(vcpu); + } + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->guest_ia32_efer; else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) @@ -7565,6 +7757,16 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) kvm_set_cr3(vcpu, vmcs12->guest_cr3); kvm_mmu_reset_context(vcpu); + /* + * L1 may access the L2's PDPTR, so save them to construct vmcs12 + */ + if (enable_ept) { + vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); + vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); + vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); + vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); + } + kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); } @@ -7887,6 +8089,22 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmcs12->guest_pending_dbg_exceptions = vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); + /* + * In some cases (usually, nested EPT), L2 is allowed to change its + * own CR3 without exiting. If it has changed it, we must keep it. + * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined + * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. + * + * Additionally, restore L2's PDPTR to vmcs12. + */ + if (enable_ept) { + vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3); + vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); + vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); + vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); + vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); + } + vmcs12->vm_entry_controls = (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE); @@ -7948,6 +8166,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { + struct kvm_segment seg; + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) @@ -7982,7 +8202,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); kvm_set_cr4(vcpu, vmcs12->host_cr4); - /* shadow page tables on either EPT or shadow page tables */ + if (nested_cpu_has_ept(vmcs12)) + nested_ept_uninit_mmu_context(vcpu); + kvm_set_cr3(vcpu, vmcs12->host_cr3); kvm_mmu_reset_context(vcpu); @@ -8001,23 +8223,61 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); - vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base); - vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base); - vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base); - vmcs_write16(GUEST_ES_SELECTOR, vmcs12->host_es_selector); - vmcs_write16(GUEST_CS_SELECTOR, vmcs12->host_cs_selector); - vmcs_write16(GUEST_SS_SELECTOR, vmcs12->host_ss_selector); - vmcs_write16(GUEST_DS_SELECTOR, vmcs12->host_ds_selector); - vmcs_write16(GUEST_FS_SELECTOR, vmcs12->host_fs_selector); - vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector); - vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector); - - if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) + + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); + vcpu->arch.pat = vmcs12->host_ia32_pat; + } if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, vmcs12->host_ia32_perf_global_ctrl); + /* Set L1 segment info according to Intel SDM + 27.5.2 Loading Host Segment and Descriptor-Table Registers */ + seg = (struct kvm_segment) { + .base = 0, + .limit = 0xFFFFFFFF, + .selector = vmcs12->host_cs_selector, + .type = 11, + .present = 1, + .s = 1, + .g = 1 + }; + if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) + seg.l = 1; + else + seg.db = 1; + vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); + seg = (struct kvm_segment) { + .base = 0, + .limit = 0xFFFFFFFF, + .type = 3, + .present = 1, + .s = 1, + .db = 1, + .g = 1 + }; + seg.selector = vmcs12->host_ds_selector; + vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); + seg.selector = vmcs12->host_es_selector; + vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); + seg.selector = vmcs12->host_ss_selector; + vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); + seg.selector = vmcs12->host_fs_selector; + seg.base = vmcs12->host_fs_base; + vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); + seg.selector = vmcs12->host_gs_selector; + seg.base = vmcs12->host_gs_base; + vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); + seg = (struct kvm_segment) { + .base = vmcs12->host_tr_base, + .limit = 0x67, + .selector = vmcs12->host_tr_selector, + .type = 11, + .present = 1 + }; + vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); + kvm_set_dr(vcpu, 7, 0x400); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d21bce505315..668f19aee6ca 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -682,17 +682,6 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) */ } - /* - * Does the new cr3 value map to physical memory? (Note, we - * catch an invalid cr3 even in real-mode, because it would - * cause trouble later on when we turn on paging anyway.) - * - * A real CPU would silently accept an invalid cr3 and would - * attempt to use it - with largely undefined (and often hard - * to debug) behavior on the guest side. - */ - if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) - return 1; vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); vcpu->arch.mmu.new_cr3(vcpu); @@ -850,7 +839,8 @@ static u32 msrs_to_save[] = { #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif - MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA + MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, + MSR_IA32_FEATURE_CONTROL }; static unsigned num_msrs_to_save; @@ -4955,6 +4945,97 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu); +static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, + unsigned long *db) +{ + u32 dr6 = 0; + int i; + u32 enable, rwlen; + + enable = dr7; + rwlen = dr7 >> 16; + for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) + if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) + dr6 |= (1 << i); + return dr6; +} + +static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) +{ + struct kvm_run *kvm_run = vcpu->run; + + /* + * Use the "raw" value to see if TF was passed to the processor. + * Note that the new value of the flags has not been saved yet. + * + * This is correct even for TF set by the guest, because "the + * processor will not generate this exception after the instruction + * that sets the TF flag". + */ + unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); + + if (unlikely(rflags & X86_EFLAGS_TF)) { + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { + kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; + kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; + kvm_run->debug.arch.exception = DB_VECTOR; + kvm_run->exit_reason = KVM_EXIT_DEBUG; + *r = EMULATE_USER_EXIT; + } else { + vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; + /* + * "Certain debug exceptions may clear bit 0-3. The + * remaining contents of the DR6 register are never + * cleared by the processor". + */ + vcpu->arch.dr6 &= ~15; + vcpu->arch.dr6 |= DR6_BS; + kvm_queue_exception(vcpu, DB_VECTOR); + } + } +} + +static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) +{ + struct kvm_run *kvm_run = vcpu->run; + unsigned long eip = vcpu->arch.emulate_ctxt.eip; + u32 dr6 = 0; + + if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && + (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { + dr6 = kvm_vcpu_check_hw_bp(eip, 0, + vcpu->arch.guest_debug_dr7, + vcpu->arch.eff_db); + + if (dr6 != 0) { + kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; + kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + + get_segment_base(vcpu, VCPU_SREG_CS); + + kvm_run->debug.arch.exception = DB_VECTOR; + kvm_run->exit_reason = KVM_EXIT_DEBUG; + *r = EMULATE_USER_EXIT; + return true; + } + } + + if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) { + dr6 = kvm_vcpu_check_hw_bp(eip, 0, + vcpu->arch.dr7, + vcpu->arch.db); + + if (dr6 != 0) { + vcpu->arch.dr6 &= ~15; + vcpu->arch.dr6 |= dr6; + kvm_queue_exception(vcpu, DB_VECTOR); + *r = EMULATE_DONE; + return true; + } + } + + return false; +} + int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int emulation_type, @@ -4975,6 +5056,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); + + /* + * We will reenter on the same instruction since + * we do not set complete_userspace_io. This does not + * handle watchpoints yet, those would be handled in + * the emulate_ops. + */ + if (kvm_vcpu_check_breakpoint(vcpu, &r)) + return r; + ctxt->interruptibility = 0; ctxt->have_exception = false; ctxt->perm_ok = false; @@ -5037,11 +5128,11 @@ restart: writeback = false; vcpu->arch.complete_userspace_io = complete_emulated_pio; } - r = EMULATE_DO_MMIO; + r = EMULATE_USER_EXIT; } else if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) writeback = false; - r = EMULATE_DO_MMIO; + r = EMULATE_USER_EXIT; vcpu->arch.complete_userspace_io = complete_emulated_mmio; } else if (r == EMULATION_RESTART) goto restart; @@ -5050,10 +5141,12 @@ restart: if (writeback) { toggle_interruptibility(vcpu, ctxt->interruptibility); - kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); + if (r == EMULATE_DONE) + kvm_vcpu_check_singlestep(vcpu, &r); + kvm_set_rflags(vcpu, ctxt->eflags); } else vcpu->arch.emulate_regs_need_sync_to_vcpu = true; @@ -5347,7 +5440,7 @@ static struct notifier_block pvclock_gtod_notifier = { int kvm_arch_init(void *opaque) { int r; - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; + struct kvm_x86_ops *ops = opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); @@ -7019,6 +7112,15 @@ out_free: return -ENOMEM; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ + /* + * memslots->generation has been incremented. + * mmio generation may have reached its maximum value. + */ + kvm_mmu_invalidate_mmio_sptes(kvm); +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, @@ -7079,11 +7181,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, */ if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) kvm_mmu_slot_remove_write_access(kvm, mem->slot); - /* - * If memory slot is created, or moved, we need to clear all - * mmio sptes. - */ - kvm_mmu_invalidate_mmio_sptes(kvm); } void kvm_arch_flush_shadow_all(struct kvm *kvm) diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index d482bcaf61c1..6a22c19da663 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -882,9 +882,9 @@ int lguest_setup_irq(unsigned int irq) * It would be far better for everyone if the Guest had its own clock, but * until then the Host gives us the time on every interrupt. */ -static unsigned long lguest_get_wallclock(void) +static void lguest_get_wallclock(struct timespec *now) { - return lguest_data.time.tv_sec; + *now = lguest_data.time; } /* diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 252b8f5489ba..4500142bc4aa 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -1,6 +1,7 @@ #include <linux/highmem.h> #include <linux/module.h> #include <linux/swap.h> /* for totalram_pages */ +#include <linux/bootmem.h> void *kmap(struct page *page) { @@ -121,6 +122,11 @@ void __init set_highmem_pages_init(void) struct zone *zone; int nid; + /* + * Explicitly reset zone->managed_pages because set_highmem_pages_init() + * is invoked before free_all_bootmem() + */ + reset_all_zones_managed_pages(); for_each_zone(zone) { unsigned long zone_start_pfn, zone_end_pfn; diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 1f34e9219775..2ec29ac78ae6 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -494,7 +494,6 @@ int devmem_is_allowed(unsigned long pagenr) void free_init_pages(char *what, unsigned long begin, unsigned long end) { - unsigned long addr; unsigned long begin_aligned, end_aligned; /* Make sure boundaries are page aligned */ @@ -509,8 +508,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) if (begin >= end) return; - addr = begin; - /* * If debugging page accesses then do not free this memory but * mark them not present - any buggy init-section access will @@ -529,18 +526,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); - - for (; addr < end; addr += PAGE_SIZE) { - memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - free_reserved_page(virt_to_page(addr)); - } + free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what); #endif } void free_initmem(void) { - free_init_pages("unused kernel memory", + free_init_pages("unused kernel", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); } @@ -566,7 +558,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) * - relocate_initrd() * So here We can do PAGE_ALIGN() safely to get partial page to be freed */ - free_init_pages("initrd memory", start, PAGE_ALIGN(end)); + free_init_pages("initrd", start, PAGE_ALIGN(end)); } #endif diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 3ac7e319918d..4287f1ffba7e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -660,10 +660,8 @@ void __init initmem_init(void) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); - num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else - num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif @@ -671,7 +669,7 @@ void __init initmem_init(void) sparse_memory_present_with_active_regions(0); #ifdef CONFIG_FLATMEM - max_mapnr = num_physpages; + max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; #endif __vmalloc_start_set = true; @@ -739,9 +737,6 @@ static void __init test_wp_bit(void) void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int tmp; - pci_iommu_alloc(); #ifdef CONFIG_FLATMEM @@ -759,32 +754,11 @@ void __init mem_init(void) set_highmem_pages_init(); /* this will put all low memory onto the freelists */ - totalram_pages += free_all_bootmem(); - - reservedpages = 0; - for (tmp = 0; tmp < max_low_pfn; tmp++) - /* - * Only count reserved RAM pages: - */ - if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) - reservedpages++; + free_all_bootmem(); after_bootmem = 1; - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " - "%dk reserved, %dk data, %dk init, %ldk highmem)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10, - totalhigh_pages << (PAGE_SHIFT-10)); - + mem_init_print_info(NULL); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b3940b6b4d7e..104d56a9245f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -712,36 +712,22 @@ EXPORT_SYMBOL_GPL(arch_add_memory); static void __meminit free_pagetable(struct page *page, int order) { - struct zone *zone; - bool bootmem = false; unsigned long magic; unsigned int nr_pages = 1 << order; /* bootmem page has reserved flag */ if (PageReserved(page)) { __ClearPageReserved(page); - bootmem = true; magic = (unsigned long)page->lru.next; if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { while (nr_pages--) put_page_bootmem(page++); } else - __free_pages_bootmem(page, order); + while (nr_pages--) + free_reserved_page(page++); } else free_pages((unsigned long)page_address(page), order); - - /* - * SECTION_INFO pages and MIX_SECTION_INFO pages - * are all allocated by bootmem. - */ - if (bootmem) { - zone = page_zone(page); - zone_span_writelock(zone); - zone->present_pages += nr_pages; - zone_span_writeunlock(zone); - totalram_pages += nr_pages; - } } static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) @@ -1058,9 +1044,6 @@ static void __init register_page_bootmem_info(void) void __init mem_init(void) { - long codesize, reservedpages, datasize, initsize; - unsigned long absent_pages; - pci_iommu_alloc(); /* clear_bss() already clear the empty_zero_page */ @@ -1068,29 +1051,14 @@ void __init mem_init(void) register_page_bootmem_info(); /* this will put all memory onto the freelists */ - totalram_pages = free_all_bootmem(); - - absent_pages = absent_pages_in_range(0, max_pfn); - reservedpages = max_pfn - totalram_pages - absent_pages; + free_all_bootmem(); after_bootmem = 1; - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - /* Register memory areas for /proc/kcore */ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); - printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " - "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - max_pfn << (PAGE_SHIFT-10), - codesize >> 10, - absent_pages << (PAGE_SHIFT-10), - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10); + mem_init_print_info(NULL); } #ifdef CONFIG_DEBUG_RODATA @@ -1166,11 +1134,10 @@ void mark_rodata_ro(void) set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif - free_init_pages("unused kernel memory", + free_init_pages("unused kernel", (unsigned long) __va(__pa_symbol(text_end)), (unsigned long) __va(__pa_symbol(rodata_start))); - - free_init_pages("unused kernel memory", + free_init_pages("unused kernel", (unsigned long) __va(__pa_symbol(rodata_end)), (unsigned long) __va(__pa_symbol(_sdata))); } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 845df6835f9f..62c29a5bfe26 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -115,10 +115,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = mmap_legacy_base(); mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 73a6d7395bd3..0342d27ca798 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -83,10 +83,8 @@ void __init initmem_init(void) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); - num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else - num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif printk(KERN_NOTICE "%ldMB LOWMEM available.\n", diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 17fda6a8b3c2..dfa537a03be1 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -240,7 +240,6 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) { pud_t *pud; - unsigned long addr; int i; if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ @@ -248,8 +247,7 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) pud = pud_offset(pgd, 0); - for (addr = i = 0; i < PREALLOCATED_PMDS; - i++, pud++, addr += PUD_SIZE) { + for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { pmd_t *pmd = pmds[i]; if (i >= KERNEL_PGD_BOUNDARY) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index f66b54086ce5..79c216aa0e2b 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -12,6 +12,7 @@ #include <linux/netdevice.h> #include <linux/filter.h> #include <linux/if_vlan.h> +#include <linux/random.h> /* * Conventions : @@ -144,6 +145,39 @@ static int pkt_type_offset(void) return -1; } +struct bpf_binary_header { + unsigned int pages; + /* Note : for security reasons, bpf code will follow a randomly + * sized amount of int3 instructions + */ + u8 image[]; +}; + +static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, + u8 **image_ptr) +{ + unsigned int sz, hole; + struct bpf_binary_header *header; + + /* Most of BPF filters are really small, + * but if some of them fill a page, allow at least + * 128 extra bytes to insert a random section of int3 + */ + sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE); + header = module_alloc(sz); + if (!header) + return NULL; + + memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ + + header->pages = sz / PAGE_SIZE; + hole = sz - (proglen + sizeof(*header)); + + /* insert a random number of int3 instructions before BPF code */ + *image_ptr = &header->image[prandom_u32() % hole]; + return header; +} + void bpf_jit_compile(struct sk_filter *fp) { u8 temp[64]; @@ -153,6 +187,7 @@ void bpf_jit_compile(struct sk_filter *fp) int t_offset, f_offset; u8 t_op, f_op, seen = 0, pass; u8 *image = NULL; + struct bpf_binary_header *header = NULL; u8 *func; int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ unsigned int cleanup_addr; /* epilogue code offset */ @@ -693,7 +728,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; if (unlikely(proglen + ilen > oldproglen)) { pr_err("bpb_jit_compile fatal error\n"); kfree(addrs); - module_free(NULL, image); + module_free(NULL, header); return; } memcpy(image + proglen, temp, ilen); @@ -717,10 +752,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; break; } if (proglen == oldproglen) { - image = module_alloc(max_t(unsigned int, - proglen, - sizeof(struct work_struct))); - if (!image) + header = bpf_alloc_binary(proglen, &image); + if (!header) goto out; } oldproglen = proglen; @@ -730,7 +763,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; bpf_jit_dump(flen, proglen, pass, image); if (image) { - bpf_flush_icache(image, image + proglen); + bpf_flush_icache(header, image + proglen); + set_memory_ro((unsigned long)header, header->pages); fp->bpf_func = (void *)image; } out: @@ -738,20 +772,13 @@ out: return; } -static void jit_free_defer(struct work_struct *arg) -{ - module_free(NULL, arg); -} - -/* run from softirq, we must use a work_struct to call - * module_free() from process context - */ void bpf_jit_free(struct sk_filter *fp) { if (fp->bpf_func != sk_run_filter) { - struct work_struct *work = (struct work_struct *)fp->bpf_func; + unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; + struct bpf_binary_header *header = (void *)addr; - INIT_WORK(work, jit_free_defer); - schedule_work(work); + set_memory_rw(addr, header->pages); + module_free(NULL, header); } } diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 3e724256dbee..d641897a1f4e 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -324,14 +324,11 @@ setup_resource(struct acpi_resource *acpi_res, void *data) res->start = start; res->end = end; info->res_offset[info->res_num] = addr.translation_offset; + info->res_num++; - if (!pci_use_crs) { + if (!pci_use_crs) dev_printk(KERN_DEBUG, &info->bridge->dev, "host bridge window %pR (ignored)\n", res); - return AE_OK; - } - - info->res_num++; return AE_OK; } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index b410b71bdcf7..c8d5577044bb 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -274,8 +274,9 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm, return status; } -int efi_set_rtc_mmss(unsigned long nowtime) +int efi_set_rtc_mmss(const struct timespec *now) { + unsigned long nowtime = now->tv_sec; efi_status_t status; efi_time_t eft; efi_time_cap_t cap; @@ -310,7 +311,7 @@ int efi_set_rtc_mmss(unsigned long nowtime) return 0; } -unsigned long efi_get_time(void) +void efi_get_time(struct timespec *now) { efi_status_t status; efi_time_t eft; @@ -320,8 +321,9 @@ unsigned long efi_get_time(void) if (status != EFI_SUCCESS) pr_err("Oops: efitime: can't read time!\n"); - return mktime(eft.year, eft.month, eft.day, eft.hour, - eft.minute, eft.second); + now->tv_sec = mktime(eft.year, eft.month, eft.day, eft.hour, + eft.minute, eft.second); + now->tv_nsec = 0; } /* diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index d62b0a3b5c14..5e355b134ba4 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c @@ -56,7 +56,7 @@ void vrtc_cmos_write(unsigned char val, unsigned char reg) } EXPORT_SYMBOL_GPL(vrtc_cmos_write); -unsigned long vrtc_get_time(void) +void vrtc_get_time(struct timespec *now) { u8 sec, min, hour, mday, mon; unsigned long flags; @@ -82,17 +82,18 @@ unsigned long vrtc_get_time(void) printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d " "mon: %d year: %d\n", sec, min, hour, mday, mon, year); - return mktime(year, mon, mday, hour, min, sec); + now->tv_sec = mktime(year, mon, mday, hour, min, sec); + now->tv_nsec = 0; } -int vrtc_set_mmss(unsigned long nowtime) +int vrtc_set_mmss(const struct timespec *now) { unsigned long flags; struct rtc_time tm; int year; int retval = 0; - rtc_time_to_tm(nowtime, &tm); + rtc_time_to_tm(now->tv_sec, &tm); if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) { /* * tm.year is the number of years since 1900, and the @@ -110,7 +111,7 @@ int vrtc_set_mmss(unsigned long nowtime) } else { printk(KERN_ERR "%s: Invalid vRTC value: write of %lx to vRTC failed\n", - __FUNCTION__, nowtime); + __FUNCTION__, now->tv_sec); retval = -EINVAL; } return retval; diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index c74436e687bf..72074d528400 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -85,15 +85,18 @@ static notrace cycle_t vread_pvclock(int *mode) cycle_t ret; u64 last; u32 version; - u32 migrate_count; u8 flags; unsigned cpu, cpu1; /* - * When looping to get a consistent (time-info, tsc) pair, we - * also need to deal with the possibility we can switch vcpus, - * so make sure we always re-fetch time-info for the current vcpu. + * Note: hypervisor must guarantee that: + * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. + * 2. that per-CPU pvclock time info is updated if the + * underlying CPU changes. + * 3. that version is increased whenever underlying CPU + * changes. + * */ do { cpu = __getcpu() & VGETCPU_CPU_MASK; @@ -104,8 +107,6 @@ static notrace cycle_t vread_pvclock(int *mode) pvti = get_pvti(cpu); - migrate_count = pvti->migrate_count; - version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); /* @@ -117,8 +118,7 @@ static notrace cycle_t vread_pvclock(int *mode) cpu1 = __getcpu() & VGETCPU_CPU_MASK; } while (unlikely(cpu != cpu1 || (pvti->pvti.version & 1) || - pvti->pvti.version != version || - pvti->migrate_count != migrate_count)); + pvti->pvti.version != version)); if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) *mode = VCLOCK_NONE; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index a690868be837..ee365895b06b 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -15,6 +15,7 @@ #include <linux/math64.h> #include <linux/gfp.h> #include <linux/slab.h> +#include <linux/pvclock_gtod.h> #include <asm/pvclock.h> #include <asm/xen/hypervisor.h> @@ -179,34 +180,56 @@ static void xen_read_wallclock(struct timespec *ts) put_cpu_var(xen_vcpu); } -static unsigned long xen_get_wallclock(void) +static void xen_get_wallclock(struct timespec *now) { - struct timespec ts; + xen_read_wallclock(now); +} - xen_read_wallclock(&ts); - return ts.tv_sec; +static int xen_set_wallclock(const struct timespec *now) +{ + return -1; } -static int xen_set_wallclock(unsigned long now) +static int xen_pvclock_gtod_notify(struct notifier_block *nb, + unsigned long was_set, void *priv) { + /* Protected by the calling core code serialization */ + static struct timespec next_sync; + struct xen_platform_op op; - int rc; + struct timespec now; - /* do nothing for domU */ - if (!xen_initial_domain()) - return -1; + now = __current_kernel_time(); + + /* + * We only take the expensive HV call when the clock was set + * or when the 11 minutes RTC synchronization time elapsed. + */ + if (!was_set && timespec_compare(&now, &next_sync) < 0) + return NOTIFY_OK; op.cmd = XENPF_settime; - op.u.settime.secs = now; - op.u.settime.nsecs = 0; + op.u.settime.secs = now.tv_sec; + op.u.settime.nsecs = now.tv_nsec; op.u.settime.system_time = xen_clocksource_read(); - rc = HYPERVISOR_dom0_op(&op); - WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now); + (void)HYPERVISOR_dom0_op(&op); - return rc; + /* + * Move the next drift compensation time 11 minutes + * ahead. That's emulating the sync_cmos_clock() update for + * the hardware RTC. + */ + next_sync = now; + next_sync.tv_sec += 11 * 60; + + return NOTIFY_OK; } +static struct notifier_block xen_pvclock_gtod_notifier = { + .notifier_call = xen_pvclock_gtod_notify, +}; + static struct clocksource xen_clocksource __read_mostly = { .name = "xen", .rating = 400, @@ -482,6 +505,9 @@ static void __init xen_time_init(void) xen_setup_runstate_info(cpu); xen_setup_timer(cpu); xen_setup_cpu_clockevents(); + + if (xen_initial_domain()) + pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); } void __init xen_init_time_ops(void) @@ -494,7 +520,9 @@ void __init xen_init_time_ops(void) x86_platform.calibrate_tsc = xen_tsc_khz; x86_platform.get_wallclock = xen_get_wallclock; - x86_platform.set_wallclock = xen_set_wallclock; + /* Dom0 uses the native method to set the hardware RTC. */ + if (!xen_initial_domain()) + x86_platform.set_wallclock = xen_set_wallclock; } #ifdef CONFIG_XEN_PVHVM diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 0a1b95f81a32..7ea6451a3a33 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -6,10 +6,12 @@ config XTENSA select ARCH_WANT_FRAME_POINTERS select HAVE_IDE select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS select HAVE_GENERIC_HARDIRQS select VIRT_TO_BUS select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES + select GENERIC_SCHED_CLOCK select MODULES_USE_ELF_RELA select GENERIC_PCI_IOMAP select ARCH_WANT_IPC_PARSE_VERSION @@ -17,6 +19,7 @@ config XTENSA select CLONE_BACKWARDS select IRQ_DOMAIN select HAVE_OPROFILE + select HAVE_FUNCTION_TRACER help Xtensa processors are 32-bit RISC machines designed by Tensilica primarily for embedded systems. These processors are both diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug index a34010e0e51c..af7da74d535f 100644 --- a/arch/xtensa/Kconfig.debug +++ b/arch/xtensa/Kconfig.debug @@ -2,6 +2,16 @@ menu "Kernel hacking" source "lib/Kconfig.debug" +config DEBUG_TLB_SANITY + bool "Debug TLB sanity" + depends on DEBUG_KERNEL + help + Enable this to turn on TLB sanity check on each entry to userspace. + This check can spot missing TLB invalidation/wrong PTE permissions/ + premature page freeing. + + If unsure, say N. + config LD_NO_RELAX bool "Disable linker relaxation" default n diff --git a/arch/xtensa/boot/.gitignore b/arch/xtensa/boot/.gitignore new file mode 100644 index 000000000000..be7655998b26 --- /dev/null +++ b/arch/xtensa/boot/.gitignore @@ -0,0 +1,3 @@ +uImage +zImage.redboot +*.dtb diff --git a/arch/xtensa/boot/boot-elf/.gitignore b/arch/xtensa/boot/boot-elf/.gitignore new file mode 100644 index 000000000000..5ff8fbb8561b --- /dev/null +++ b/arch/xtensa/boot/boot-elf/.gitignore @@ -0,0 +1 @@ +boot.lds diff --git a/arch/xtensa/boot/lib/.gitignore b/arch/xtensa/boot/lib/.gitignore new file mode 100644 index 000000000000..1629a6167755 --- /dev/null +++ b/arch/xtensa/boot/lib/.gitignore @@ -0,0 +1,3 @@ +inffast.c +inflate.c +inftrees.c diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile index ad8952e8a07f..6868f2ca6af8 100644 --- a/arch/xtensa/boot/lib/Makefile +++ b/arch/xtensa/boot/lib/Makefile @@ -7,6 +7,13 @@ zlib := inffast.c inflate.c inftrees.c lib-y += $(zlib:.c=.o) zmem.o ccflags-y := -Ilib/zlib_inflate +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_inflate.o = -pg +CFLAGS_REMOVE_zmem.o = -pg +CFLAGS_REMOVE_inftrees.o = -pg +CFLAGS_REMOVE_inffast.o = -pg +endif + quiet_cmd_copy_zlib = COPY $@ cmd_copy_zlib = cat $< > $@ diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h index 0c25799facab..23392c5630ce 100644 --- a/arch/xtensa/include/asm/bootparam.h +++ b/arch/xtensa/include/asm/bootparam.h @@ -20,7 +20,7 @@ #define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/ #define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */ #define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */ -#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */ +#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */ #define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */ #define BP_TAG_FDT 0x1006 /* flat device tree addr */ diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index d9ab131bc1aa..370b26f38414 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -93,6 +93,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ (unsigned long)(n), sizeof(*(ptr)))) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) /* * xchg_u32 diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h index 61fc5faeb46c..3899610c1dff 100644 --- a/arch/xtensa/include/asm/delay.h +++ b/arch/xtensa/include/asm/delay.h @@ -12,7 +12,7 @@ #ifndef _XTENSA_DELAY_H #define _XTENSA_DELAY_H -#include <asm/processor.h> +#include <asm/timex.h> #include <asm/param.h> extern unsigned long loops_per_jiffy; @@ -24,24 +24,17 @@ static inline void __delay(unsigned long loops) : "=r" (loops) : "0" (loops)); } -static __inline__ u32 xtensa_get_ccount(void) -{ - u32 ccount; - asm volatile ("rsr %0, ccount\n" : "=r" (ccount)); - return ccount; -} - /* For SMP/NUMA systems, change boot_cpu_data to something like * local_cpu_data->... where local_cpu_data points to the current * cpu. */ static __inline__ void udelay (unsigned long usecs) { - unsigned long start = xtensa_get_ccount(); + unsigned long start = get_ccount(); unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ)); /* Note: all variables are unsigned (can wrap around)! */ - while (((unsigned long)xtensa_get_ccount()) - start < cycles) + while (((unsigned long)get_ccount()) - start < cycles) ; } diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h index 36dc7a684397..73cc3f482304 100644 --- a/arch/xtensa/include/asm/ftrace.h +++ b/arch/xtensa/include/asm/ftrace.h @@ -13,6 +13,7 @@ #include <asm/processor.h> #define HAVE_ARCH_CALLER_ADDR +#ifndef __ASSEMBLY__ #define CALLER_ADDR0 ({ unsigned long a0, a1; \ __asm__ __volatile__ ( \ "mov %0, a0\n" \ @@ -24,10 +25,22 @@ extern unsigned long return_address(unsigned level); #define CALLER_ADDR1 return_address(1) #define CALLER_ADDR2 return_address(2) #define CALLER_ADDR3 return_address(3) -#else +#else /* CONFIG_FRAME_POINTER */ #define CALLER_ADDR1 (0) #define CALLER_ADDR2 (0) #define CALLER_ADDR3 (0) -#endif +#endif /* CONFIG_FRAME_POINTER */ +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_FUNCTION_TRACER + +#define MCOUNT_ADDR ((unsigned long)(_mcount)) +#define MCOUNT_INSN_SIZE 3 + +#ifndef __ASSEMBLY__ +extern void _mcount(void); +#define mcount _mcount +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_FUNCTION_TRACER */ #endif /* _XTENSA_FTRACE_H */ diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 8f017eb309bd..0fdf5d043f82 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -5,7 +5,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * Copyright (C) 2001 - 2007 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. */ #ifndef _XTENSA_PGTABLE_H @@ -64,41 +64,82 @@ * Virtual memory area. We keep a distance to other memory regions to be * on the safe side. We also use this area for cache aliasing. */ - #define VMALLOC_START 0xC0000000 #define VMALLOC_END 0xC7FEFFFF #define TLBTEMP_BASE_1 0xC7FF0000 #define TLBTEMP_BASE_2 0xC7FF8000 /* - * Xtensa Linux config PTE layout (when present): - * 31-12: PPN - * 11-6: Software - * 5-4: RING - * 3-0: CA + * For the Xtensa architecture, the PTE layout is as follows: + * + * 31------12 11 10-9 8-6 5-4 3-2 1-0 + * +-----------------------------------------+ + * | | Software | HARDWARE | + * | PPN | ADW | RI |Attribute| + * +-----------------------------------------+ + * pte_none | MBZ | 01 | 11 | 00 | + * +-----------------------------------------+ + * present | PPN | 0 | 00 | ADW | RI | CA | wx | + * +- - - - - - - - - - - - - - - - - - - - -+ + * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 | + * +-----------------------------------------+ + * swap | index | type | 01 | 11 | 00 | + * +- - - - - - - - - - - - - - - - - - - - -+ + * file | file offset | 01 | 11 | 10 | + * +-----------------------------------------+ + * + * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE) + * +-----------------------------------------+ + * present | PPN | 0 | 00 | ADW | RI | CA | w1 | + * +-----------------------------------------+ + * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 | + * +-----------------------------------------+ * - * Similar to the Alpha and MIPS ports, we need to keep track of the ref - * and mod bits in software. We have a software "you can read - * from this page" bit, and a hardware one which actually lets the - * process read from the page. On the same token we have a software - * writable bit and the real hardware one which actually lets the - * process write to the page. + * Legend: + * PPN Physical Page Number + * ADW software: accessed (young) / dirty / writable + * RI ring (0=privileged, 1=user, 2 and 3 are unused) + * CA cache attribute: 00 bypass, 01 writeback, 10 writethrough + * (11 is invalid and used to mark pages that are not present) + * w page is writable (hw) + * x page is executable (hw) + * index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB) + * (note that the index is always non-zero) + * type swap type (5 bits -> 32 types) + * file offset 26-bit offset into the file, in increments of PAGE_SIZE * - * See further below for PTE layout for swapped-out pages. + * Notes: + * - (PROT_NONE) is a special case of 'present' but causes an exception for + * any access (read, write, and execute). + * - 'multihit-exception' has the highest priority of all MMU exceptions, + * so the ring must be set to 'RING_USER' even for 'non-present' pages. + * - on older hardware, the exectuable flag was not supported and + * used as a 'valid' flag, so it needs to be always set. + * - we need to keep track of certain flags in software (dirty and young) + * to do this, we use write exceptions and have a separate software w-flag. + * - attribute value 1101 (and 1111 on T1050 and earlier) is reserved */ +#define _PAGE_ATTRIB_MASK 0xf + #define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */ #define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */ -#define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */ -#define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */ - -/* None of these cache modes include MP coherency: */ #define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */ #define _PAGE_CA_WB (1<<2) /* write-back */ #define _PAGE_CA_WT (2<<2) /* write-through */ #define _PAGE_CA_MASK (3<<2) -#define _PAGE_INVALID (3<<2) +#define _PAGE_CA_INVALID (3<<2) + +/* We use invalid attribute values to distinguish special pte entries */ +#if XCHAL_HW_VERSION_MAJOR < 2000 +#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */ +#define _PAGE_NONE 0x04 +#else +#define _PAGE_HW_VALID 0x00 +#define _PAGE_NONE 0x0f +#endif +#define _PAGE_FILE (1<<1) /* file mapped page, only if !present */ #define _PAGE_USER (1<<4) /* user access (ring=1) */ @@ -108,19 +149,12 @@ #define _PAGE_DIRTY (1<<7) /* software: page dirty */ #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ -/* On older HW revisions, we always have to set bit 0 */ -#if XCHAL_HW_VERSION_MAJOR < 2000 -# define _PAGE_VALID (1<<0) -#else -# define _PAGE_VALID 0 -#endif - -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED) - #ifdef CONFIG_MMU -#define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED) + +#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) @@ -132,9 +166,9 @@ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) #if (DCACHE_WAY_SIZE > PAGE_SIZE) -# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED) +# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS) #else -# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB) +# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB) #endif #else /* no mmu */ @@ -202,12 +236,16 @@ static inline void pgtable_cache_init(void) { } /* * pte status. */ -#define pte_none(pte) (pte_val(pte) == _PAGE_INVALID) -#define pte_present(pte) \ - (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \ - || ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE)) +# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER)) +#if XCHAL_HW_VERSION_MAJOR < 2000 +# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) +#else +# define pte_present(pte) \ + (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \ + || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE)) +#endif #define pte_clear(mm,addr,ptep) \ - do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0) + do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) @@ -328,35 +366,23 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) /* - * Encode and decode a swap entry. - * - * Format of swap pte: - * bit 0 MBZ - * bit 1 page-file (must be zero) - * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID) - * bits 4 - 5 ring protection (must be 01: _PAGE_USER) - * bits 6 - 10 swap type (5 bits -> 32 types) - * bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB) - - * Format of file pte: - * bit 0 MBZ - * bit 1 page-file (must be one: _PAGE_FILE) - * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID) - * bits 4 - 5 ring protection (must be 01: _PAGE_USER) - * bits 6 - 31 file offset / PAGE_SIZE + * Encode and decode a swap and file entry. */ +#define SWP_TYPE_BITS 5 +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) #define __swp_type(entry) (((entry).val >> 6) & 0x1f) #define __swp_offset(entry) ((entry).val >> 11) #define __swp_entry(type,offs) \ - ((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID}) + ((swp_entry_t){((type) << 6) | ((offs) << 11) | \ + _PAGE_CA_INVALID | _PAGE_USER}) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define PTE_FILE_MAX_BITS 28 -#define pte_to_pgoff(pte) (pte_val(pte) >> 4) +#define PTE_FILE_MAX_BITS 26 +#define pte_to_pgoff(pte) (pte_val(pte) >> 6) #define pgoff_to_pte(off) \ - ((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE }) + ((pte_t) { ((off) << 6) | _PAGE_CA_INVALID | _PAGE_FILE | _PAGE_USER }) #endif /* !defined (__ASSEMBLY__) */ diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h index ec098b68fb9a..32e98f27ce97 100644 --- a/arch/xtensa/include/asm/platform.h +++ b/arch/xtensa/include/asm/platform.h @@ -30,11 +30,6 @@ extern void platform_init(bp_tag_t*); extern void platform_setup (char **); /* - * platform_init_irq is called from init_IRQ. - */ -extern void platform_init_irq (void); - -/* * platform_restart is called to restart the system. */ extern void platform_restart (void); diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h index 3d35e5d0367e..69f901713fb6 100644 --- a/arch/xtensa/include/asm/timex.h +++ b/arch/xtensa/include/asm/timex.h @@ -35,19 +35,11 @@ # error "Bad timer number for Linux configurations!" #endif -#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT) - -#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */ -#define CLOCK_TICK_FACTOR 20 /* Factor of both 10^6 and CLOCK_TICK_RATE */ - #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT -extern unsigned long ccount_per_jiffy; -extern unsigned long nsec_per_ccount; -#define CCOUNT_PER_JIFFY ccount_per_jiffy -#define NSEC_PER_CCOUNT nsec_per_ccount +extern unsigned long ccount_freq; +#define CCOUNT_PER_JIFFY (ccount_freq / HZ) #else #define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) -#define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK) #endif diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index a8f44f50e651..c114483010c1 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -85,4 +85,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + #endif /* _XTENSA_SOCKET_H */ diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore new file mode 100644 index 000000000000..c5f676c3c224 --- /dev/null +++ b/arch/xtensa/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index 1e7fc87a94bb..f90265ec1ccc 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile @@ -11,6 +11,7 @@ obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \ obj-$(CONFIG_KGDB) += xtensa-stub.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o +obj-$(CONFIG_FUNCTION_TRACER) += mcount.o AFLAGS_head.o += -mtext-section-literals diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 5082507d5631..9298742f0fd0 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -458,7 +458,7 @@ common_exception_return: _bbsi.l a4, TIF_NEED_RESCHED, 3f _bbsi.l a4, TIF_NOTIFY_RESUME, 2f - _bbci.l a4, TIF_SIGPENDING, 4f + _bbci.l a4, TIF_SIGPENDING, 5f 2: l32i a4, a1, PT_DEPC bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f @@ -476,6 +476,13 @@ common_exception_return: callx4 a4 j 1b +5: +#ifdef CONFIG_DEBUG_TLB_SANITY + l32i a4, a1, PT_DEPC + bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f + movi a4, check_tlb_sanity + callx4 a4 +#endif 4: /* Restore optional registers. */ load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT @@ -1792,10 +1799,15 @@ ENTRY(fast_store_prohibited) l32i a0, a0, 0 beqz a0, 2f - /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/ + /* + * Note that we test _PAGE_WRITABLE_BIT only if PTE is present + * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. + */ _PTE_OFFSET(a0, a1, a4) l32i a4, a0, 0 # read pteval + movi a1, _PAGE_CA_INVALID + ball a4, a1, 2f bbci.l a4, _PAGE_WRITABLE_BIT, 2f movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index ef12c0e6fa25..7d740ebbe198 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -68,6 +68,15 @@ _SetupMMU: #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX initialize_mmu +#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY + rsr a2, excsave1 + movi a3, 0x08000000 + bgeu a2, a3, 1f + movi a3, 0xd0000000 + add a2, a2, a3 + wsr a2, excsave1 +1: +#endif #endif .end no-absolute-literals diff --git a/arch/xtensa/kernel/mcount.S b/arch/xtensa/kernel/mcount.S new file mode 100644 index 000000000000..0eeda2e4a25e --- /dev/null +++ b/arch/xtensa/kernel/mcount.S @@ -0,0 +1,50 @@ +/* + * arch/xtensa/kernel/mcount.S + * + * Xtensa specific mcount support + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Tensilica Inc. + */ + +#include <linux/linkage.h> +#include <asm/ftrace.h> + +/* + * Entry condition: + * + * a2: a0 of the caller + */ + +ENTRY(_mcount) + + entry a1, 16 + + movi a4, ftrace_trace_function + l32i a4, a4, 0 + movi a3, ftrace_stub + bne a3, a4, 1f + retw + +1: xor a7, a2, a1 + movi a3, 0x3fffffff + and a7, a7, a3 + xor a7, a7, a1 + + xor a6, a0, a1 + and a6, a6, a3 + xor a6, a6, a1 + addi a6, a6, -MCOUNT_INSN_SIZE + callx4 a4 + + retw + +ENDPROC(_mcount) + +ENTRY(ftrace_stub) + entry a1, 16 + retw +ENDPROC(ftrace_stub) diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index 126c18839409..5b3403388d7f 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -77,9 +77,9 @@ pcibios_align_resource(void *data, const struct resource *res, if (res->flags & IORESOURCE_IO) { if (size > 0x100) { - printk(KERN_ERR "PCI: I/O Region %s/%d too large" - " (%ld bytes)\n", pci_name(dev), - dev->resource - res, size); + pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n", + pci_name(dev), dev->resource - res, + size); } if (start & 0x300) @@ -174,7 +174,7 @@ static int __init pcibios_init(void) struct pci_controller *pci_ctrl; struct list_head resources; struct pci_bus *bus; - int next_busno = 0, i; + int next_busno = 0; printk("PCI: Probing PCI hardware\n"); @@ -197,7 +197,7 @@ static int __init pcibios_init(void) subsys_initcall(pcibios_init); -void __init pcibios_fixup_bus(struct pci_bus *bus) +void pcibios_fixup_bus(struct pci_bus *bus) { if (bus->parent) { /* This is a subordinate bridge */ diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c index 2bd6c351f37c..1cf008284dd2 100644 --- a/arch/xtensa/kernel/platform.c +++ b/arch/xtensa/kernel/platform.c @@ -29,7 +29,6 @@ */ _F(void, setup, (char** cmd), { }); -_F(void, init_irq, (void), { }); _F(void, restart, (void), { while(1); }); _F(void, halt, (void), { while(1); }); _F(void, power_off, (void), { while(1); }); @@ -42,6 +41,6 @@ _F(void, pcibios_init, (void), { }); _F(void, calibrate_ccount, (void), { pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n"); - ccount_per_jiffy = 10 * (1000000UL/HZ); + ccount_freq = 10 * 1000000UL; }); #endif diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 6dd25ecde3f5..42a8bba0b0ea 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -152,8 +152,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag) { meminfo_t* mi; mi = (meminfo_t*)(tag->data); - initrd_start = (void*)(mi->start); - initrd_end = (void*)(mi->end); + initrd_start = __va(mi->start); + initrd_end = __va(mi->end); return 0; } @@ -164,7 +164,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd); static int __init parse_tag_fdt(const bp_tag_t *tag) { - dtb_start = (void *)(tag->data[0]); + dtb_start = __va(tag->data[0]); return 0; } @@ -256,7 +256,7 @@ void __init early_init_devtree(void *params) static void __init copy_devtree(void) { void *alloc = early_init_dt_alloc_memory_arch( - be32_to_cpu(initial_boot_params->totalsize), 0); + be32_to_cpu(initial_boot_params->totalsize), 8); if (alloc) { memcpy(alloc, initial_boot_params, be32_to_cpu(initial_boot_params->totalsize)); diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index ffb474104311..bdbb17312526 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -16,6 +16,7 @@ #include <linux/sched.h> #include <linux/time.h> #include <linux/clocksource.h> +#include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/init.h> @@ -23,13 +24,13 @@ #include <linux/profile.h> #include <linux/delay.h> #include <linux/irqdomain.h> +#include <linux/sched_clock.h> #include <asm/timex.h> #include <asm/platform.h> #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT -unsigned long ccount_per_jiffy; /* per 1/HZ */ -unsigned long nsec_per_ccount; /* nsec per ccount increment */ +unsigned long ccount_freq; /* ccount Hz */ #endif static cycle_t ccount_read(struct clocksource *cs) @@ -37,6 +38,11 @@ static cycle_t ccount_read(struct clocksource *cs) return (cycle_t)get_ccount(); } +static u32 notrace ccount_sched_clock_read(void) +{ + return get_ccount(); +} + static struct clocksource ccount_clocksource = { .name = "ccount", .rating = 200, @@ -44,29 +50,98 @@ static struct clocksource ccount_clocksource = { .mask = CLOCKSOURCE_MASK(32), }; +static int ccount_timer_set_next_event(unsigned long delta, + struct clock_event_device *dev); +static void ccount_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt); +static struct ccount_timer_t { + struct clock_event_device evt; + int irq_enabled; +} ccount_timer = { + .evt = { + .name = "ccount_clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = ccount_timer_set_next_event, + .set_mode = ccount_timer_set_mode, + }, +}; + +static int ccount_timer_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + unsigned long flags, next; + int ret = 0; + + local_irq_save(flags); + next = get_ccount() + delta; + set_linux_timer(next); + if (next - get_ccount() > delta) + ret = -ETIME; + local_irq_restore(flags); + + return ret; +} + +static void ccount_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + struct ccount_timer_t *timer = + container_of(evt, struct ccount_timer_t, evt); + + /* + * There is no way to disable the timer interrupt at the device level, + * only at the intenable register itself. Since enable_irq/disable_irq + * calls are nested, we need to make sure that these calls are + * balanced. + */ + switch (mode) { + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + if (timer->irq_enabled) { + disable_irq(evt->irq); + timer->irq_enabled = 0; + } + break; + case CLOCK_EVT_MODE_RESUME: + case CLOCK_EVT_MODE_ONESHOT: + if (!timer->irq_enabled) { + enable_irq(evt->irq); + timer->irq_enabled = 1; + } + default: + break; + } +} + static irqreturn_t timer_interrupt(int irq, void *dev_id); static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, + .flags = IRQF_TIMER, .name = "timer", + .dev_id = &ccount_timer, }; void __init time_init(void) { - unsigned int irq; #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT printk("Calibrating CPU frequency "); platform_calibrate_ccount(); - printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), - (int)(ccount_per_jiffy/(10000/HZ))%100); + printk("%d.%02d MHz\n", (int)ccount_freq/1000000, + (int)(ccount_freq/10000)%100); #endif clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ); - /* Initialize the linux timer interrupt. */ + ccount_timer.evt.cpumask = cpumask_of(0); + ccount_timer.evt.irq = irq_create_mapping(NULL, LINUX_TIMER_INT); + if (WARN(!ccount_timer.evt.irq, "error: can't map timer irq")) + return; + clockevents_config_and_register(&ccount_timer.evt, ccount_freq, 0xf, + 0xffffffff); + setup_irq(ccount_timer.evt.irq, &timer_irqaction); + ccount_timer.irq_enabled = 1; - irq = irq_create_mapping(NULL, LINUX_TIMER_INT); - setup_irq(irq, &timer_irqaction); - set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY); + setup_sched_clock(ccount_sched_clock_read, 32, ccount_freq); } /* @@ -75,36 +150,14 @@ void __init time_init(void) irqreturn_t timer_interrupt (int irq, void *dev_id) { + struct ccount_timer_t *timer = dev_id; + struct clock_event_device *evt = &timer->evt; - unsigned long next; - - next = get_linux_timer(); - -again: - while ((signed long)(get_ccount() - next) > 0) { - - profile_tick(CPU_PROFILING); -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif - - xtime_update(1); /* Linux handler in kernel/time/timekeeping */ - - /* Note that writing CCOMPARE clears the interrupt. */ - - next += CCOUNT_PER_JIFFY; - set_linux_timer(next); - } + evt->event_handler(evt); /* Allow platform to do something useful (Wdog). */ - platform_heartbeat(); - /* Make sure we didn't miss any tick... */ - - if ((signed long)(get_ccount() - next) > 0) - goto again; - return IRQ_HANDLED; } diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 42c53c87c204..d8507f812f46 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -124,3 +124,7 @@ extern long common_exception_return; extern long _spill_registers; EXPORT_SYMBOL(common_exception_return); EXPORT_SYMBOL(_spill_registers); + +#ifdef CONFIG_FUNCTION_TRACER +EXPORT_SYMBOL(_mcount); +#endif diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index bba125b4bb06..479d7537a32a 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -173,39 +173,16 @@ void __init zones_init(void) void __init mem_init(void) { - unsigned long codesize, reservedpages, datasize, initsize; - unsigned long highmemsize, tmp, ram; - - max_mapnr = num_physpages = max_low_pfn - ARCH_PFN_OFFSET; + max_mapnr = max_low_pfn - ARCH_PFN_OFFSET; high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - highmemsize = 0; #ifdef CONFIG_HIGHMEM #error HIGHGMEM not implemented in init.c #endif - totalram_pages += free_all_bootmem(); - - reservedpages = ram = 0; - for (tmp = 0; tmp < max_mapnr; tmp++) { - ram++; - if (PageReserved(mem_map+tmp)) - reservedpages++; - } + free_all_bootmem(); - codesize = (unsigned long) _etext - (unsigned long) _stext; - datasize = (unsigned long) _edata - (unsigned long) _sdata; - initsize = (unsigned long) __init_end - (unsigned long) __init_begin; - - printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " - "%ldk data, %ldk init %ldk highmem)\n", - nr_free_pages() << (PAGE_SHIFT-10), - ram << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10, - highmemsize >> 10); + mem_init_print_info(NULL); } #ifdef CONFIG_BLK_DEV_INITRD @@ -214,11 +191,11 @@ extern int initrd_is_mapped; void free_initrd_mem(unsigned long start, unsigned long end) { if (initrd_is_mapped) - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 5411aa67c68e..ca9d2366bf12 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -64,7 +64,7 @@ void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) { unsigned long flags; - local_save_flags(flags); + local_irq_save(flags); __get_new_mmu_context(mm); __load_mmu_context(mm); local_irq_restore(flags); @@ -94,7 +94,7 @@ void flush_tlb_range (struct vm_area_struct *vma, printk("[tlbrange<%02lx,%08lx,%08lx>]\n", (unsigned long)mm->context, start, end); #endif - local_save_flags(flags); + local_irq_save(flags); if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { int oldpid = get_rasid_register(); @@ -128,9 +128,10 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) if(mm->context == NO_CONTEXT) return; - local_save_flags(flags); + local_irq_save(flags); oldpid = get_rasid_register(); + set_rasid_register(ASID_INSERT(mm->context)); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); @@ -140,3 +141,116 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) local_irq_restore(flags); } + +#ifdef CONFIG_DEBUG_TLB_SANITY + +static unsigned get_pte_for_vaddr(unsigned vaddr) +{ + struct task_struct *task = get_current(); + struct mm_struct *mm = task->mm; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + + if (!mm) + mm = task->active_mm; + pgd = pgd_offset(mm, vaddr); + if (pgd_none_or_clear_bad(pgd)) + return 0; + pmd = pmd_offset(pgd, vaddr); + if (pmd_none_or_clear_bad(pmd)) + return 0; + pte = pte_offset_map(pmd, vaddr); + if (!pte) + return 0; + return pte_val(*pte); +} + +enum { + TLB_SUSPICIOUS = 1, + TLB_INSANE = 2, +}; + +static void tlb_insane(void) +{ + BUG_ON(1); +} + +static void tlb_suspicious(void) +{ + WARN_ON(1); +} + +/* + * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE), + * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE. + * + * Check that valid TLB entries either have the same PA as the PTE, or PTE is + * marked as non-present. Non-present PTE and the page with non-zero refcount + * and zero mapcount is normal for batched TLB flush operation. Zero refcount + * means that the page was freed prematurely. Non-zero mapcount is unusual, + * but does not necessary means an error, thus marked as suspicious. + */ +static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) +{ + unsigned tlbidx = w | (e << PAGE_SHIFT); + unsigned r0 = dtlb ? + read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); + unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); + unsigned pte = get_pte_for_vaddr(vpn); + unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; + unsigned tlb_asid = r0 & ASID_MASK; + bool kernel = tlb_asid == 1; + int rc = 0; + + if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) { + pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n", + dtlb ? 'D' : 'I', w, e, vpn, + kernel ? "kernel" : "user"); + rc |= TLB_INSANE; + } + + if (tlb_asid == mm_asid) { + unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : + read_itlb_translation(tlbidx); + if ((pte ^ r1) & PAGE_MASK) { + pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", + dtlb ? 'D' : 'I', w, e, r0, r1, pte); + if (pte == 0 || !pte_present(__pte(pte))) { + struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); + pr_err("page refcount: %d, mapcount: %d\n", + page_count(p), + page_mapcount(p)); + if (!page_count(p)) + rc |= TLB_INSANE; + else if (page_mapped(p)) + rc |= TLB_SUSPICIOUS; + } else { + rc |= TLB_INSANE; + } + } + } + return rc; +} + +void check_tlb_sanity(void) +{ + unsigned long flags; + unsigned w, e; + int bug = 0; + + local_irq_save(flags); + for (w = 0; w < DTLB_ARF_WAYS; ++w) + for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e) + bug |= check_tlb_entry(w, e, true); + for (w = 0; w < ITLB_ARF_WAYS; ++w) + for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e) + bug |= check_tlb_entry(w, e, false); + if (bug & TLB_INSANE) + tlb_insane(); + if (bug & TLB_SUSPICIOUS) + tlb_suspicious(); + local_irq_restore(flags); +} + +#endif /* CONFIG_DEBUG_TLB_SANITY */ diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 7d0fea6d7f20..56f88b7afe2f 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -700,7 +700,7 @@ struct iss_net_init { #define ERR KERN_ERR "iss_net_setup: " -static int iss_net_setup(char *str) +static int __init iss_net_setup(char *str) { struct iss_net_private *device = NULL; struct iss_net_init *new; diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index c0edb35424ce..8c6e819cd8ed 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -108,13 +108,13 @@ static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio) sector_t sector = bio->bi_sector; bio_for_each_segment(bvec, bio, i) { - char *buffer = __bio_kmap_atomic(bio, i, KM_USER0); + char *buffer = __bio_kmap_atomic(bio, i); unsigned len = bvec->bv_len >> SECTOR_SHIFT; simdisk_transfer(dev, sector, len, buffer, bio_data_dir(bio) == WRITE); sector += len; - __bio_kunmap_atomic(bio, KM_USER0); + __bio_kunmap_atomic(bio); } return 0; } diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 96ef8eeb064e..74bb74fa3f87 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c @@ -163,7 +163,7 @@ void platform_heartbeat(void) #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT -void platform_calibrate_ccount(void) +void __init platform_calibrate_ccount(void) { long clk_freq = 0; #ifdef CONFIG_OF @@ -179,8 +179,7 @@ void platform_calibrate_ccount(void) if (!clk_freq) clk_freq = *(long *)XTFPGA_CLKFRQ_VADDR; - ccount_per_jiffy = clk_freq / HZ; - nsec_per_ccount = 1000000000UL / clk_freq; + ccount_freq = clk_freq; } #endif diff --git a/arch/xtensa/variants/s6000/delay.c b/arch/xtensa/variants/s6000/delay.c index 54b2b573f166..39154563ee17 100644 --- a/arch/xtensa/variants/s6000/delay.c +++ b/arch/xtensa/variants/s6000/delay.c @@ -1,4 +1,3 @@ -#include <asm/delay.h> #include <asm/timex.h> #include <asm/io.h> #include <variant/hardware.h> @@ -17,11 +16,10 @@ void platform_calibrate_ccount(void) "1: l32i %0, %2, 0 ;" " beq %0, %1, 1b ;" : "=&a"(u) : "a"(t), "a"(tstamp)); - b = xtensa_get_ccount(); + b = get_ccount(); if (i == LOOPS) a = b; } while (--i >= 0); b -= a; - nsec_per_ccount = (LOOPS * 10000) / b; - ccount_per_jiffy = b * (100000UL / (LOOPS * HZ)); + ccount_freq = b * (100000UL / LOOPS); } |