diff options
Diffstat (limited to 'arch')
221 files changed, 2058 insertions, 1377 deletions
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 4567aca6fdd6..dfa32f061320 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -12,16 +12,22 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include <asm-generic/dma-mapping-common.h> -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { - return get_dma_ops(dev)->alloc_coherent(dev, size, dma_handle, gfp); + return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs); } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { - get_dma_ops(dev)->free_coherent(dev, size, vaddr, dma_handle); + get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs); } static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index 04eea4894ef3..df24b76f9246 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -108,7 +108,8 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn, } static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { void *ret; @@ -123,7 +124,8 @@ static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, } static void alpha_noop_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_addr) + void *cpu_addr, dma_addr_t dma_addr, + struct dma_attrs *attrs) { free_pages((unsigned long)cpu_addr, get_order(size)); } @@ -174,8 +176,8 @@ static int alpha_noop_set_mask(struct device *dev, u64 mask) } struct dma_map_ops alpha_noop_ops = { - .alloc_coherent = alpha_noop_alloc_coherent, - .free_coherent = alpha_noop_free_coherent, + .alloc = alpha_noop_alloc_coherent, + .free = alpha_noop_free_coherent, .map_page = alpha_noop_map_page, .map_sg = alpha_noop_map_sg, .mapping_error = alpha_noop_mapping_error, diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 43610804987d..cd634795aa9c 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -434,7 +434,8 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, else DMA_ADDRP is undefined. */ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addrp, gfp_t gfp) + dma_addr_t *dma_addrp, gfp_t gfp, + struct dma_attrs *attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); void *cpu_addr; @@ -478,7 +479,8 @@ try_again: DMA_ADDR past this call are illegal. */ static void alpha_pci_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_addr) + void *cpu_addr, dma_addr_t dma_addr, + struct dma_attrs *attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); @@ -952,8 +954,8 @@ static int alpha_pci_set_mask(struct device *dev, u64 mask) } struct dma_map_ops alpha_pci_ops = { - .alloc_coherent = alpha_pci_alloc_coherent, - .free_coherent = alpha_pci_free_coherent, + .alloc = alpha_pci_alloc_coherent, + .free = alpha_pci_free_coherent, .map_page = alpha_pci_map_page, .unmap_page = alpha_pci_unmap_page, .map_sg = alpha_pci_map_sg, diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 6f7feb5db271..35f2ef44de12 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -120,12 +120,13 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, */ SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) { - mask &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); + sigset_t blocked; + current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, mask); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + + mask &= _BLOCKABLE; + siginitset(&blocked, mask); + set_current_blocked(&blocked); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -238,10 +239,7 @@ do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs, goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); if (restore_sigcontext(sc, regs, sw)) goto give_sigsegv; @@ -276,10 +274,7 @@ do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs, goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw)) goto give_sigsegv; @@ -501,14 +496,8 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, else ret = setup_frame(sig, ka, oldset, regs, sw); - if (ret == 0) { - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - } + if (ret == 0) + block_sigmask(ka, sig); return ret; } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 93180845ae16..cf006d40342c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -338,6 +338,7 @@ config ARCH_AT91 select HAVE_CLK select CLKDEV_LOOKUP select IRQ_DOMAIN + select NEED_MACH_IO_H if PCCARD help This enables support for systems based on the Atmel AT91RM9200, AT91SAM9 processors. diff --git a/arch/arm/boot/dts/at91sam9g20.dtsi b/arch/arm/boot/dts/at91sam9g20.dtsi index 92f36627e7f8..799ad1889b51 100644 --- a/arch/arm/boot/dts/at91sam9g20.dtsi +++ b/arch/arm/boot/dts/at91sam9g20.dtsi @@ -35,7 +35,7 @@ }; }; - memory@20000000 { + memory { reg = <0x20000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts index ac0dc0031dda..7829a4d0cb22 100644 --- a/arch/arm/boot/dts/at91sam9g25ek.dts +++ b/arch/arm/boot/dts/at91sam9g25ek.dts @@ -37,8 +37,8 @@ usb0: ohci@00600000 { status = "okay"; num-ports = <2>; - atmel,vbus-gpio = <&pioD 19 0 - &pioD 20 0 + atmel,vbus-gpio = <&pioD 19 1 + &pioD 20 1 >; }; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index 3d0c32fb218f..9e6eb6ecea0e 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -36,7 +36,7 @@ }; }; - memory@70000000 { + memory { reg = <0x70000000 0x10000000>; }; diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts index c4c8ae4123d5..a3633bd13111 100644 --- a/arch/arm/boot/dts/at91sam9m10g45ek.dts +++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts @@ -17,7 +17,7 @@ bootargs = "mem=64M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; }; - memory@70000000 { + memory { reg = <0x70000000 0x4000000>; }; @@ -73,8 +73,8 @@ usb0: ohci@00700000 { status = "okay"; num-ports = <2>; - atmel,vbus-gpio = <&pioD 1 0 - &pioD 3 0>; + atmel,vbus-gpio = <&pioD 1 1 + &pioD 3 1>; }; usb1: ehci@00800000 { diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index c111001f254e..70ab3a4e026f 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -34,7 +34,7 @@ }; }; - memory@20000000 { + memory { reg = <0x20000000 0x10000000>; }; @@ -201,8 +201,8 @@ >; atmel,nand-addr-offset = <21>; atmel,nand-cmd-offset = <22>; - gpios = <&pioC 8 0 - &pioC 14 0 + gpios = <&pioD 5 0 + &pioD 4 0 0 >; status = "disabled"; diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi index 67936f83c694..31e7be23703d 100644 --- a/arch/arm/boot/dts/at91sam9x5cm.dtsi +++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi @@ -8,7 +8,7 @@ */ / { - memory@20000000 { + memory { reg = <0x20000000 0x8000000>; }; diff --git a/arch/arm/boot/dts/usb_a9g20.dts b/arch/arm/boot/dts/usb_a9g20.dts index 3b3c4e0fa79f..7c2399c532e5 100644 --- a/arch/arm/boot/dts/usb_a9g20.dts +++ b/arch/arm/boot/dts/usb_a9g20.dts @@ -16,7 +16,7 @@ bootargs = "mem=64M console=ttyS0,115200 root=/dev/mtdblock5 rw rootfstype=ubifs"; }; - memory@20000000 { + memory { reg = <0x20000000 0x4000000>; }; diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 44f4a09ff37b..05112380dc53 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -2,6 +2,7 @@ #define __ASM_BARRIER_H #ifndef __ASSEMBLY__ +#include <asm/outercache.h> #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); @@ -39,7 +40,6 @@ #ifdef CONFIG_ARCH_HAS_BARRIERS #include <mach/barriers.h> #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) -#include <asm/outercache.h> #define mb() do { dsb(); outer_sync(); } while (0) #define rmb() dsb() #define wmb() mb() diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index df0ac0bb39aa..9af5563dd3eb 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -119,7 +119,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr) #ifdef CONFIG_NEED_MACH_IO_H #include <mach/io.h> #else -#define __io(a) ({ (void)(a); __typesafe_io(0); }) +#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT) #endif /* diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 632df9a66f8c..ede5f7741c42 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -299,7 +299,6 @@ static inline int pdev_bad_for_parity(struct pci_dev *dev) */ void pcibios_fixup_bus(struct pci_bus *bus) { - struct pci_sys_data *root = bus->sysdata; struct pci_dev *dev; u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK; diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c index ab312e516546..b760340b7014 100644 --- a/arch/arm/kernel/insn.c +++ b/arch/arm/kernel/insn.c @@ -1,3 +1,4 @@ +#include <linux/bug.h> #include <linux/kernel.h> #include <asm/opcodes.h> diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 45956c9d0ef0..80abafb9bf33 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -256,7 +256,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, { unsigned long tmp; - if (off & 3 || off >= sizeof(struct user)) + if (off & 3) return -EIO; tmp = 0; @@ -268,6 +268,8 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, tmp = tsk->mm->end_code; else if (off < sizeof(struct pt_regs)) tmp = get_user_reg(tsk, off >> 2); + else if (off >= sizeof(struct user)) + return -EIO; return put_user(tmp, ret); } diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 7e5651ee9f85..5652dde4bbe2 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -598,6 +598,9 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) else cs_pin = spi1_standard_cs[devices[i].chip_select]; + if (!gpio_is_valid(cs_pin)) + continue; + if (devices[i].bus_num == 0) enable_spi0 = 1; else diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 096da87dc00d..4db961a93085 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -415,6 +415,9 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) else cs_pin = spi1_standard_cs[devices[i].chip_select]; + if (!gpio_is_valid(cs_pin)) + continue; + if (devices[i].bus_num == 0) enable_spi0 = 1; else diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index 53688c46f956..fe99206de880 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -72,7 +72,8 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) /* Enable VBus control for UHP ports */ for (i = 0; i < data->ports; i++) { if (gpio_is_valid(data->vbus_pin[i])) - at91_set_gpio_output(data->vbus_pin[i], 0); + at91_set_gpio_output(data->vbus_pin[i], + data->vbus_pin_active_low[i]); } /* Enable overcurrent notification */ @@ -671,6 +672,9 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) else cs_pin = spi1_standard_cs[devices[i].chip_select]; + if (!gpio_is_valid(cs_pin)) + continue; + if (devices[i].bus_num == 0) enable_spi0 = 1; else diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 698479f1e197..6b008aee1dff 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -127,12 +127,13 @@ void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data) /* Enable VBus control for UHP ports */ for (i = 0; i < data->ports; i++) { if (gpio_is_valid(data->vbus_pin[i])) - at91_set_gpio_output(data->vbus_pin[i], 0); + at91_set_gpio_output(data->vbus_pin[i], + data->vbus_pin_active_low[i]); } /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { - if (data->overcurrent_pin[i]) + if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } @@ -188,7 +189,8 @@ void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data) /* Enable VBus control for UHP ports */ for (i = 0; i < data->ports; i++) { if (gpio_is_valid(data->vbus_pin[i])) - at91_set_gpio_output(data->vbus_pin[i], 0); + at91_set_gpio_output(data->vbus_pin[i], + data->vbus_pin_active_low[i]); } usbh_ehci_data = *data; @@ -785,6 +787,9 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) else cs_pin = spi1_standard_cs[devices[i].chip_select]; + if (!gpio_is_valid(cs_pin)) + continue; + if (devices[i].bus_num == 0) enable_spi0 = 1; else diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index eda72e83037d..fe4ae22e8561 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c @@ -419,6 +419,9 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) else cs_pin = spi_standard_cs[devices[i].chip_select]; + if (!gpio_is_valid(cs_pin)) + continue; + /* enable chip-select pin */ at91_set_gpio_output(cs_pin, 1); diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c index b6831eeb7b76..13c8cae60462 100644 --- a/arch/arm/mach-at91/at91sam9x5.c +++ b/arch/arm/mach-at91/at91sam9x5.c @@ -223,6 +223,8 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), + CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma0_clk), + CLKDEV_CON_DEV_ID("dma_clk", "ffffee00.dma-controller", &dma1_clk), CLKDEV_CON_ID("pioA", &pioAB_clk), CLKDEV_CON_ID("pioB", &pioAB_clk), CLKDEV_CON_ID("pioC", &pioCD_clk), diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c index 66f0ddf4b2ae..2ffe50f3a9e9 100644 --- a/arch/arm/mach-at91/board-sam9263ek.c +++ b/arch/arm/mach-at91/board-sam9263ek.c @@ -74,6 +74,7 @@ static void __init ek_init_early(void) static struct at91_usbh_data __initdata ek_usbh_data = { .ports = 2, .vbus_pin = { AT91_PIN_PA24, AT91_PIN_PA21 }, + .vbus_pin_active_low = {1, 1}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c index e1bea73e6b30..c88e908ddd82 100644 --- a/arch/arm/mach-at91/board-sam9m10g45ek.c +++ b/arch/arm/mach-at91/board-sam9m10g45ek.c @@ -71,6 +71,7 @@ static void __init ek_init_early(void) static struct at91_usbh_data __initdata ek_usbh_hs_data = { .ports = 2, .vbus_pin = {AT91_PIN_PD1, AT91_PIN_PD3}, + .vbus_pin_active_low = {1, 1}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h index 544a5d5ce416..49a821192c65 100644 --- a/arch/arm/mach-at91/include/mach/board.h +++ b/arch/arm/mach-at91/include/mach/board.h @@ -86,14 +86,15 @@ extern void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *d extern void __init at91_add_device_eth(struct macb_platform_data *data); /* USB Host */ +#define AT91_MAX_USBH_PORTS 3 struct at91_usbh_data { - u8 ports; /* number of ports on root hub */ - int vbus_pin[2]; /* port power-control pin */ - u8 vbus_pin_active_low[2]; + int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */ + int overcurrent_pin[AT91_MAX_USBH_PORTS]; + u8 ports; /* number of ports on root hub */ u8 overcurrent_supported; - int overcurrent_pin[2]; - u8 overcurrent_status[2]; - u8 overcurrent_changed[2]; + u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS]; + u8 overcurrent_status[AT91_MAX_USBH_PORTS]; + u8 overcurrent_changed[AT91_MAX_USBH_PORTS]; }; extern void __init at91_add_device_usbh(struct at91_usbh_data *data); extern void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data); diff --git a/arch/arm/mach-at91/include/mach/io.h b/arch/arm/mach-at91/include/mach/io.h new file mode 100644 index 000000000000..2d9ca0455745 --- /dev/null +++ b/arch/arm/mach-at91/include/mach/io.h @@ -0,0 +1,27 @@ +/* + * arch/arm/mach-at91/include/mach/io.h + * + * Copyright (C) 2003 SAN People + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_ARCH_IO_H +#define __ASM_ARCH_IO_H + +#define IO_SPACE_LIMIT 0xFFFFFFFF +#define __io(a) __typesafe_io(a) + +#endif diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index e6cc50e94a58..8614aab47cc0 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c @@ -583,10 +583,11 @@ core_initcall(exynos_core_init); #ifdef CONFIG_CACHE_L2X0 static int __init exynos4_l2x0_cache_init(void) { + int ret; + if (soc_is_exynos5250()) return 0; - int ret; ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK); if (!ret) { l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c index 3983abee4264..69aaa4503205 100644 --- a/arch/arm/mach-exynos/dma.c +++ b/arch/arm/mach-exynos/dma.c @@ -35,8 +35,6 @@ #include <mach/irqs.h> #include <mach/dma.h> -static u64 dma_dmamask = DMA_BIT_MASK(32); - static u8 exynos4210_pdma0_peri[] = { DMACH_PCM0_RX, DMACH_PCM0_TX, diff --git a/arch/arm/mach-exynos/include/mach/debug-macro.S b/arch/arm/mach-exynos/include/mach/debug-macro.S index 6c857ff0b5d8..e0c86ea475e7 100644 --- a/arch/arm/mach-exynos/include/mach/debug-macro.S +++ b/arch/arm/mach-exynos/include/mach/debug-macro.S @@ -21,10 +21,9 @@ */ .macro addruart, rp, rv, tmp - mov \rp, #0x10000000 - ldr \rp, [\rp, #0x0] - and \rp, \rp, #0xf00000 - teq \rp, #0x500000 @@ EXYNOS5 + mrc p15, 0, \tmp, c0, c0, 0 + and \tmp, \tmp, #0xf0 + teq \tmp, #0xf0 @@ A15 ldreq \rp, =EXYNOS5_PA_UART movne \rp, #EXYNOS4_PA_UART @@ EXYNOS4 ldr \rv, =S3C_VA_UART diff --git a/arch/arm/mach-exynos/include/mach/uncompress.h b/arch/arm/mach-exynos/include/mach/uncompress.h index 493f4f365ddf..2979995d5a6a 100644 --- a/arch/arm/mach-exynos/include/mach/uncompress.h +++ b/arch/arm/mach-exynos/include/mach/uncompress.h @@ -20,9 +20,24 @@ volatile u8 *uart_base; #include <plat/uncompress.h> +static unsigned int __raw_readl(unsigned int ptr) +{ + return *((volatile unsigned int *)ptr); +} + static void arch_detect_cpu(void) { - if (machine_is_smdk5250()) + u32 chip_id = __raw_readl(EXYNOS_PA_CHIPID); + + /* + * product_id is bits 31:12 + * bits 23:20 describe the exynosX family + * + */ + chip_id >>= 20; + chip_id &= 0xf; + + if (chip_id == 0x5) uart_base = (volatile u8 *)EXYNOS5_PA_UART + (S3C_UART_OFFSET * CONFIG_S3C_LOWLEVEL_UART_PORT); else uart_base = (volatile u8 *)EXYNOS4_PA_UART + (S3C_UART_OFFSET * CONFIG_S3C_LOWLEVEL_UART_PORT); diff --git a/arch/arm/mach-imx/clock-imx27.c b/arch/arm/mach-imx/clock-imx27.c index b9a95ed75553..98e04f5a87dd 100644 --- a/arch/arm/mach-imx/clock-imx27.c +++ b/arch/arm/mach-imx/clock-imx27.c @@ -662,6 +662,7 @@ static struct clk_lookup lookups[] = { _REGISTER_CLOCK(NULL, "dma", dma_clk) _REGISTER_CLOCK(NULL, "rtic", rtic_clk) _REGISTER_CLOCK(NULL, "brom", brom_clk) + _REGISTER_CLOCK(NULL, "emma", emma_clk) _REGISTER_CLOCK("m2m-emmaprp.0", NULL, emma_clk) _REGISTER_CLOCK(NULL, "slcdc", slcdc_clk) _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk) diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c index 1e279af656ad..e56c1a83eee3 100644 --- a/arch/arm/mach-imx/clock-imx35.c +++ b/arch/arm/mach-imx/clock-imx35.c @@ -483,7 +483,7 @@ static struct clk_lookup lookups[] = { _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk) _REGISTER_CLOCK(NULL, "max", max_clk) _REGISTER_CLOCK(NULL, "audmux", audmux_clk) - _REGISTER_CLOCK(NULL, "csi", csi_clk) + _REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk) _REGISTER_CLOCK(NULL, "iim", iim_clk) _REGISTER_CLOCK(NULL, "gpu2d", gpu2d_clk) _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk) diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c index 27bc27e6ea41..c650145d1646 100644 --- a/arch/arm/mach-imx/mach-armadillo5x0.c +++ b/arch/arm/mach-imx/mach-armadillo5x0.c @@ -38,6 +38,8 @@ #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> #include <linux/delay.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/fixed.h> #include <mach/hardware.h> #include <asm/mach-types.h> @@ -479,6 +481,11 @@ static struct platform_device *devices[] __initdata = { &armadillo5x0_smc911x_device, }; +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vdd33a", "smsc911x"), + REGULATOR_SUPPLY("vddvario", "smsc911x"), +}; + /* * Perform board specific initializations */ @@ -489,6 +496,8 @@ static void __init armadillo5x0_init(void) mxc_iomux_setup_multiple_pins(armadillo5x0_pins, ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0"); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + platform_add_devices(devices, ARRAY_SIZE(devices)); imx_add_gpio_keys(&armadillo5x0_button_data); imx31_add_imx_i2c1(NULL); diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c index fc78e8071cd1..15a26e908260 100644 --- a/arch/arm/mach-imx/mach-kzm_arm11_01.c +++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c @@ -24,6 +24,8 @@ #include <linux/serial_8250.h> #include <linux/smsc911x.h> #include <linux/types.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/fixed.h> #include <asm/irq.h> #include <asm/mach-types.h> @@ -166,6 +168,11 @@ static struct platform_device kzm_smsc9118_device = { }, }; +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vdd33a", "smsc911x"), + REGULATOR_SUPPLY("vddvario", "smsc911x"), +}; + static int __init kzm_init_smsc9118(void) { /* @@ -175,6 +182,8 @@ static int __init kzm_init_smsc9118(void) gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2), "smsc9118-int"); gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2)); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + return platform_device_register(&kzm_smsc9118_device); } #else diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c index 02401bbd6d53..83714b0cc290 100644 --- a/arch/arm/mach-imx/mach-mx31lilly.c +++ b/arch/arm/mach-imx/mach-mx31lilly.c @@ -34,6 +34,8 @@ #include <linux/mfd/mc13783.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/fixed.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -242,6 +244,11 @@ static struct platform_device *devices[] __initdata = { static int mx31lilly_baseboard; core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444); +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vdd33a", "smsc911x"), + REGULATOR_SUPPLY("vddvario", "smsc911x"), +}; + static void __init mx31lilly_board_init(void) { imx31_soc_init(); @@ -280,6 +287,8 @@ static void __init mx31lilly_board_init(void) imx31_add_spi_imx1(&spi1_pdata); spi_register_board_info(&mc13783_dev, 1); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + platform_add_devices(devices, ARRAY_SIZE(devices)); /* USB */ diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c index ef80751712e7..0abef5f13df5 100644 --- a/arch/arm/mach-imx/mach-mx31lite.c +++ b/arch/arm/mach-imx/mach-mx31lite.c @@ -29,6 +29,8 @@ #include <linux/usb/ulpi.h> #include <linux/mtd/physmap.h> #include <linux/delay.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/fixed.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -226,6 +228,11 @@ void __init mx31lite_map_io(void) static int mx31lite_baseboard; core_param(mx31lite_baseboard, mx31lite_baseboard, int, 0444); +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vdd33a", "smsc911x"), + REGULATOR_SUPPLY("vddvario", "smsc911x"), +}; + static void __init mx31lite_init(void) { int ret; @@ -259,6 +266,8 @@ static void __init mx31lite_init(void) if (usbh2_pdata.otg) imx31_add_mxc_ehci_hs(2, &usbh2_pdata); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + /* SMSC9117 IRQ pin */ ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_SFS6), "sms9117-irq"); if (ret) diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c index e14291d89e4f..6ae51c6b95b7 100644 --- a/arch/arm/mach-imx/mach-mx35_3ds.c +++ b/arch/arm/mach-imx/mach-mx35_3ds.c @@ -97,7 +97,7 @@ static struct i2c_board_info __initdata i2c_devices_3ds[] = { static int lcd_power_gpio = -ENXIO; static int mc9s08dz60_gpiochip_match(struct gpio_chip *chip, - void *data) + const void *data) { return !strcmp(chip->label, data); } diff --git a/arch/arm/mach-imx/mach-mx53_ard.c b/arch/arm/mach-imx/mach-mx53_ard.c index 753f4fc9ec04..05641980dc5e 100644 --- a/arch/arm/mach-imx/mach-mx53_ard.c +++ b/arch/arm/mach-imx/mach-mx53_ard.c @@ -23,6 +23,8 @@ #include <linux/delay.h> #include <linux/gpio.h> #include <linux/smsc911x.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/fixed.h> #include <mach/common.h> #include <mach/hardware.h> @@ -214,6 +216,11 @@ static int weim_cs_config(void) return 0; } +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vdd33a", "smsc911x"), + REGULATOR_SUPPLY("vddvario", "smsc911x"), +}; + void __init imx53_ard_common_init(void) { mxc_iomux_v3_setup_multiple_pads(mx53_ard_pads, @@ -232,6 +239,7 @@ static void __init mx53_ard_board_init(void) imx53_ard_common_init(); mx53_ard_io_init(); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); platform_add_devices(devices, ARRAY_SIZE(devices)); imx53_add_sdhci_esdhc_imx(0, &mx53_ard_sd1_data); diff --git a/arch/arm/mach-msm/include/mach/uncompress.h b/arch/arm/mach-msm/include/mach/uncompress.h index 169a84007456..c14011fe832d 100644 --- a/arch/arm/mach-msm/include/mach/uncompress.h +++ b/arch/arm/mach-msm/include/mach/uncompress.h @@ -16,6 +16,7 @@ #ifndef __ASM_ARCH_MSM_UNCOMPRESS_H #define __ASM_ARCH_MSM_UNCOMPRESS_H +#include <asm/barrier.h> #include <asm/processor.h> #include <mach/msm_iomap.h> diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c index 0c56a5aaf588..c56df9e932ae 100644 --- a/arch/arm/mach-msm/smd_debug.c +++ b/arch/arm/mach-msm/smd_debug.c @@ -203,15 +203,9 @@ static ssize_t debug_read(struct file *file, char __user *buf, return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize); } -static int debug_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - static const struct file_operations debug_ops = { .read = debug_read, - .open = debug_open, + .open = simple_open, .llseek = default_llseek, }; diff --git a/arch/arm/mach-omap1/include/mach/io.h b/arch/arm/mach-omap1/include/mach/io.h new file mode 100644 index 000000000000..ce4f8005b26f --- /dev/null +++ b/arch/arm/mach-omap1/include/mach/io.h @@ -0,0 +1,45 @@ +/* + * arch/arm/mach-omap1/include/mach/io.h + * + * IO definitions for TI OMAP processors and boards + * + * Copied from arch/arm/mach-sa1100/include/mach/io.h + * Copyright (C) 1997-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Modifications: + * 06-12-1997 RMK Created. + * 07-04-1999 RMK Major cleanup + */ + +#ifndef __ASM_ARM_ARCH_IO_H +#define __ASM_ARM_ARCH_IO_H + +#define IO_SPACE_LIMIT 0xffffffff + +/* + * We don't actually have real ISA nor PCI buses, but there is so many + * drivers out there that might just work if we fake them... + */ +#define __io(a) __typesafe_io(a) + +#endif diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index 41b0a2fe0b04..909a8b91b564 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c @@ -26,6 +26,7 @@ #include <linux/i2c/at24.h> #include <linux/i2c/twl.h> +#include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/mmc/host.h> @@ -81,8 +82,23 @@ static struct omap_smsc911x_platform_data sb_t35_smsc911x_cfg = { .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, }; +static struct regulator_consumer_supply cm_t35_smsc911x_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), +}; + +static struct regulator_consumer_supply sb_t35_smsc911x_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.1"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.1"), +}; + static void __init cm_t35_init_ethernet(void) { + regulator_register_fixed(0, cm_t35_smsc911x_supplies, + ARRAY_SIZE(cm_t35_smsc911x_supplies)); + regulator_register_fixed(1, sb_t35_smsc911x_supplies, + ARRAY_SIZE(sb_t35_smsc911x_supplies)); + gpmc_smsc911x_init(&cm_t35_smsc911x_cfg); gpmc_smsc911x_init(&sb_t35_smsc911x_cfg); } diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index e558800adfdf..930c0d380435 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -634,8 +634,14 @@ static void __init igep_wlan_bt_init(void) static inline void __init igep_wlan_bt_init(void) { } #endif +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), +}; + static void __init igep_init(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); /* Get IGEP2 hardware revision */ diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index d50a562adfa0..1b6049567ab4 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c @@ -22,6 +22,7 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/spi/spi.h> +#include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/i2c/twl.h> #include <linux/io.h> @@ -410,8 +411,14 @@ static struct mtd_partition ldp_nand_partitions[] = { }; +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), +}; + static void __init omap_ldp_init(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); ldp_init_smsc911x(); omap_i2c_init(); diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 4c90f078abe1..49df12735b41 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c @@ -114,15 +114,6 @@ static struct omap_smsc911x_platform_data smsc911x_cfg = { static inline void __init omap3evm_init_smsc911x(void) { - struct clk *l3ck; - unsigned int rate; - - l3ck = clk_get(NULL, "l3_ck"); - if (IS_ERR(l3ck)) - rate = 100000000; - else - rate = clk_get_rate(l3ck); - /* Configure ethernet controller reset gpio */ if (cpu_is_omap3430()) { if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1) @@ -632,9 +623,15 @@ static void __init omap3_evm_wl12xx_init(void) #endif } +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), +}; + static void __init omap3_evm_init(void) { omap3_evm_get_revision(); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); if (cpu_is_omap3630()) omap3_mux_init(omap36x_board_mux, OMAP_PACKAGE_CBB); diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c index 4a7d8c8a75da..9b3c141ff51b 100644 --- a/arch/arm/mach-omap2/board-omap3logic.c +++ b/arch/arm/mach-omap2/board-omap3logic.c @@ -23,6 +23,7 @@ #include <linux/io.h> #include <linux/gpio.h> +#include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/i2c/twl.h> @@ -188,8 +189,14 @@ static struct omap_board_mux board_mux[] __initdata = { }; #endif +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), +}; + static void __init omap3logic_init(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); omap3torpedo_fix_pbias_voltage(); omap3logic_i2c_init(); diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c index 641004380795..4dffc95bddd2 100644 --- a/arch/arm/mach-omap2/board-omap3stalker.c +++ b/arch/arm/mach-omap2/board-omap3stalker.c @@ -24,6 +24,7 @@ #include <linux/input.h> #include <linux/gpio_keys.h> +#include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/i2c/twl.h> #include <linux/mmc/host.h> @@ -72,15 +73,6 @@ static struct omap_smsc911x_platform_data smsc911x_cfg = { static inline void __init omap3stalker_init_eth(void) { - struct clk *l3ck; - unsigned int rate; - - l3ck = clk_get(NULL, "l3_ck"); - if (IS_ERR(l3ck)) - rate = 100000000; - else - rate = clk_get_rate(l3ck); - omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP); gpmc_smsc911x_init(&smsc911x_cfg); } @@ -419,8 +411,14 @@ static struct omap_board_mux board_mux[] __initdata = { }; #endif +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), +}; + static void __init omap3_stalker_init(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CUS); omap_board_config = omap3_stalker_config; omap_board_config_size = ARRAY_SIZE(omap3_stalker_config); diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c index 668533e2a379..33aa3910b09e 100644 --- a/arch/arm/mach-omap2/board-overo.c +++ b/arch/arm/mach-omap2/board-overo.c @@ -498,10 +498,18 @@ static struct gpio overo_bt_gpios[] __initdata = { { OVERO_GPIO_BT_NRESET, GPIOF_OUT_INIT_HIGH, "lcd bl enable" }, }; +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), + REGULATOR_SUPPLY("vddvario", "smsc911x.1"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.1"), +}; + static void __init overo_init(void) { int ret; + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); omap_hsmmc_init(mmc); overo_i2c_init(); diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c index 1e8540eabde9..f64f44173061 100644 --- a/arch/arm/mach-omap2/board-zoom-debugboard.c +++ b/arch/arm/mach-omap2/board-zoom-debugboard.c @@ -14,6 +14,9 @@ #include <linux/smsc911x.h> #include <linux/interrupt.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> + #include <plat/gpmc.h> #include <plat/gpmc-smsc911x.h> @@ -117,11 +120,17 @@ static struct platform_device *zoom_devices[] __initdata = { &zoom_debugboard_serial_device, }; +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), +}; + int __init zoom_debugboard_init(void) { if (!omap_zoom_debugboard_detect()) return 0; + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); zoom_init_smsc911x(); zoom_init_quaduart(); return platform_add_devices(zoom_devices, ARRAY_SIZE(zoom_devices)); diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c index 480fb8f09aed..f4a626f7c79e 100644 --- a/arch/arm/mach-omap2/clock3xxx_data.c +++ b/arch/arm/mach-omap2/clock3xxx_data.c @@ -747,7 +747,7 @@ static struct clk dpll4_m3_ck = { .parent = &dpll4_ck, .init = &omap2_init_clksel_parent, .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), - .clksel_mask = OMAP3430_CLKSEL_TV_MASK, + .clksel_mask = OMAP3630_CLKSEL_TV_MASK, .clksel = dpll4_clksel, .clkdm_name = "dpll4_clkdm", .recalc = &omap2_clksel_recalc, @@ -832,7 +832,7 @@ static struct clk dpll4_m4_ck = { .parent = &dpll4_ck, .init = &omap2_init_clksel_parent, .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), - .clksel_mask = OMAP3430_CLKSEL_DSS1_MASK, + .clksel_mask = OMAP3630_CLKSEL_DSS1_MASK, .clksel = dpll4_clksel, .clkdm_name = "dpll4_clkdm", .recalc = &omap2_clksel_recalc, @@ -859,7 +859,7 @@ static struct clk dpll4_m5_ck = { .parent = &dpll4_ck, .init = &omap2_init_clksel_parent, .clksel_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL), - .clksel_mask = OMAP3430_CLKSEL_CAM_MASK, + .clksel_mask = OMAP3630_CLKSEL_CAM_MASK, .clksel = dpll4_clksel, .clkdm_name = "dpll4_clkdm", .set_rate = &omap2_clksel_set_rate, @@ -886,7 +886,7 @@ static struct clk dpll4_m6_ck = { .parent = &dpll4_ck, .init = &omap2_init_clksel_parent, .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - .clksel_mask = OMAP3430_DIV_DPLL4_MASK, + .clksel_mask = OMAP3630_DIV_DPLL4_MASK, .clksel = dpll4_clksel, .clkdm_name = "dpll4_clkdm", .recalc = &omap2_clksel_recalc, @@ -1394,6 +1394,7 @@ static struct clk cpefuse_fck = { .name = "cpefuse_fck", .ops = &clkops_omap2_dflt, .parent = &sys_ck, + .clkdm_name = "core_l4_clkdm", .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3), .enable_bit = OMAP3430ES2_EN_CPEFUSE_SHIFT, .recalc = &followparent_recalc, @@ -1403,6 +1404,7 @@ static struct clk ts_fck = { .name = "ts_fck", .ops = &clkops_omap2_dflt, .parent = &omap_32k_fck, + .clkdm_name = "core_l4_clkdm", .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3), .enable_bit = OMAP3430ES2_EN_TS_SHIFT, .recalc = &followparent_recalc, @@ -1412,6 +1414,7 @@ static struct clk usbtll_fck = { .name = "usbtll_fck", .ops = &clkops_omap2_dflt_wait, .parent = &dpll5_m2_ck, + .clkdm_name = "core_l4_clkdm", .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3), .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT, .recalc = &followparent_recalc, @@ -1617,6 +1620,7 @@ static struct clk fshostusb_fck = { .name = "fshostusb_fck", .ops = &clkops_omap2_dflt_wait, .parent = &core_48m_fck, + .clkdm_name = "core_l4_clkdm", .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT, .recalc = &followparent_recalc, @@ -2043,6 +2047,7 @@ static struct clk omapctrl_ick = { .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP3430_EN_OMAPCTRL_SHIFT, .flags = ENABLE_ON_INIT, + .clkdm_name = "core_l4_clkdm", .recalc = &followparent_recalc, }; @@ -2094,6 +2099,7 @@ static struct clk usb_l4_ick = { .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), .clksel_mask = OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK, .clksel = usb_l4_clksel, + .clkdm_name = "core_l4_clkdm", .recalc = &omap2_clksel_recalc, }; @@ -3467,8 +3473,8 @@ static struct omap_clk omap3xxx_clks[] = { CLK(NULL, "ipss_ick", &ipss_ick, CK_AM35XX), CLK(NULL, "rmii_ck", &rmii_ck, CK_AM35XX), CLK(NULL, "pclk_ck", &pclk_ck, CK_AM35XX), - CLK("davinci_emac", "emac_clk", &emac_ick, CK_AM35XX), - CLK("davinci_emac", "phy_clk", &emac_fck, CK_AM35XX), + CLK("davinci_emac", NULL, &emac_ick, CK_AM35XX), + CLK("davinci_mdio.0", NULL, &emac_fck, CK_AM35XX), CLK("vpfe-capture", "master", &vpfe_ick, CK_AM35XX), CLK("vpfe-capture", "slave", &vpfe_fck, CK_AM35XX), CLK("musb-am35x", "ick", &hsotgusb_ick_am35xx, CK_AM35XX), diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c index c03c1108468e..fa6ea65ad44b 100644 --- a/arch/arm/mach-omap2/clock44xx_data.c +++ b/arch/arm/mach-omap2/clock44xx_data.c @@ -957,8 +957,8 @@ static struct dpll_data dpll_usb_dd = { .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_USB, .idlest_reg = OMAP4430_CM_IDLEST_DPLL_USB, - .mult_mask = OMAP4430_DPLL_MULT_MASK, - .div1_mask = OMAP4430_DPLL_DIV_MASK, + .mult_mask = OMAP4430_DPLL_MULT_USB_MASK, + .div1_mask = OMAP4430_DPLL_DIV_0_7_MASK, .enable_mask = OMAP4430_DPLL_EN_MASK, .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK, .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK, @@ -978,6 +978,7 @@ static struct clk dpll_usb_ck = { .recalc = &omap3_dpll_recalc, .round_rate = &omap2_dpll_round_rate, .set_rate = &omap3_noncore_dpll_set_rate, + .clkdm_name = "l3_init_clkdm", }; static struct clk dpll_usb_clkdcoldo_ck = { diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c index 9299ac291d28..bd7ed13515cc 100644 --- a/arch/arm/mach-omap2/clockdomains44xx_data.c +++ b/arch/arm/mach-omap2/clockdomains44xx_data.c @@ -390,7 +390,7 @@ static struct clockdomain emu_sys_44xx_clkdm = { .prcm_partition = OMAP4430_PRM_PARTITION, .cm_inst = OMAP4430_PRM_EMU_CM_INST, .clkdm_offs = OMAP4430_PRM_EMU_CM_EMU_CDOFFS, - .flags = CLKDM_CAN_HWSUP, + .flags = CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_FORCE_WAKEUP, }; static struct clockdomain l3_dma_44xx_clkdm = { diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.c b/arch/arm/mach-omap2/gpmc-smsc911x.c index 5e5880d6d099..b6c77be3e8f7 100644 --- a/arch/arm/mach-omap2/gpmc-smsc911x.c +++ b/arch/arm/mach-omap2/gpmc-smsc911x.c @@ -19,15 +19,11 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/smsc911x.h> -#include <linux/regulator/fixed.h> -#include <linux/regulator/machine.h> #include <plat/board.h> #include <plat/gpmc.h> #include <plat/gpmc-smsc911x.h> -static struct omap_smsc911x_platform_data *gpmc_cfg; - static struct resource gpmc_smsc911x_resources[] = { [0] = { .flags = IORESOURCE_MEM, @@ -41,51 +37,6 @@ static struct smsc911x_platform_config gpmc_smsc911x_config = { .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, - .flags = SMSC911X_USE_16BIT, -}; - -static struct regulator_consumer_supply gpmc_smsc911x_supply[] = { - REGULATOR_SUPPLY("vddvario", "smsc911x.0"), - REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), -}; - -/* Generic regulator definition to satisfy smsc911x */ -static struct regulator_init_data gpmc_smsc911x_reg_init_data = { - .constraints = { - .min_uV = 3300000, - .max_uV = 3300000, - .valid_modes_mask = REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .valid_ops_mask = REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(gpmc_smsc911x_supply), - .consumer_supplies = gpmc_smsc911x_supply, -}; - -static struct fixed_voltage_config gpmc_smsc911x_fixed_reg_data = { - .supply_name = "gpmc_smsc911x", - .microvolts = 3300000, - .gpio = -EINVAL, - .startup_delay = 0, - .enable_high = 0, - .enabled_at_boot = 1, - .init_data = &gpmc_smsc911x_reg_init_data, -}; - -/* - * Platform device id of 42 is a temporary fix to avoid conflicts - * with other reg-fixed-voltage devices. The real fix should - * involve the driver core providing a way of dynamically - * assigning a unique id on registration for platform devices - * in the same name space. - */ -static struct platform_device gpmc_smsc911x_regulator = { - .name = "reg-fixed-voltage", - .id = 42, - .dev = { - .platform_data = &gpmc_smsc911x_fixed_reg_data, - }, }; /* @@ -93,23 +44,12 @@ static struct platform_device gpmc_smsc911x_regulator = { * assume that pin multiplexing is done in the board-*.c file, * or in the bootloader. */ -void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data) +void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *gpmc_cfg) { struct platform_device *pdev; unsigned long cs_mem_base; int ret; - gpmc_cfg = board_data; - - if (!gpmc_cfg->id) { - ret = platform_device_register(&gpmc_smsc911x_regulator); - if (ret < 0) { - pr_err("Unable to register smsc911x regulators: %d\n", - ret); - return; - } - } - if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) { pr_err("Failed to request GPMC mem region\n"); return; @@ -139,8 +79,7 @@ void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data) gpio_set_value(gpmc_cfg->gpio_reset, 1); } - if (gpmc_cfg->flags) - gpmc_smsc911x_config.flags = gpmc_cfg->flags; + gpmc_smsc911x_config.flags = gpmc_cfg->flags ? : SMSC911X_USE_16BIT; pdev = platform_device_register_resndata(NULL, "smsc911x", gpmc_cfg->id, gpmc_smsc911x_resources, ARRAY_SIZE(gpmc_smsc911x_resources), diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index 100db6217f39..b0268eaffe13 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -506,6 +506,13 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo, if (oh->dev_attr != NULL) { mmc_dev_attr = oh->dev_attr; mmc_data->controller_flags = mmc_dev_attr->flags; + /* + * erratum 2.1.1.128 doesn't apply if board has + * a transceiver is attached + */ + if (hsmmcinfo->transceiver) + mmc_data->controller_flags &= + ~OMAP_HSMMC_BROKEN_MULTIBLOCK_READ; } pdev = platform_device_alloc(name, ctrl_nr - 1); diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h index 4fa72c7cc7cd..1c582a8592b9 100644 --- a/arch/arm/mach-omap2/include/mach/barriers.h +++ b/arch/arm/mach-omap2/include/mach/barriers.h @@ -22,6 +22,8 @@ #ifndef __MACH_BARRIERS_H #define __MACH_BARRIERS_H +#include <asm/outercache.h> + extern void omap_bus_sync(void); #define rmb() dsb() diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index eba6cd3816f5..2c27fdb61e66 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1395,7 +1395,7 @@ static int _read_hardreset(struct omap_hwmod *oh, const char *name) */ static int _ocp_softreset(struct omap_hwmod *oh) { - u32 v; + u32 v, softrst_mask; int c = 0; int ret = 0; @@ -1427,11 +1427,13 @@ static int _ocp_softreset(struct omap_hwmod *oh) oh->class->sysc->syss_offs) & SYSS_RESETDONE_MASK), MAX_MODULE_SOFTRESET_WAIT, c); - else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) + else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) { + softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); omap_test_timeout(!(omap_hwmod_read(oh, oh->class->sysc->sysc_offs) - & SYSC_TYPE2_SOFTRESET_MASK), + & softrst_mask), MAX_MODULE_SOFTRESET_WAIT, c); + } if (c == MAX_MODULE_SOFTRESET_WAIT) pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", @@ -1477,6 +1479,11 @@ static int _reset(struct omap_hwmod *oh) ret = (oh->class->reset) ? oh->class->reset(oh) : _ocp_softreset(oh); + if (oh->class->sysc) { + _update_sysc_cache(oh); + _enable_sysc(oh); + } + return ret; } @@ -1786,20 +1793,9 @@ static int _setup(struct omap_hwmod *oh, void *data) return 0; } - if (!(oh->flags & HWMOD_INIT_NO_RESET)) { + if (!(oh->flags & HWMOD_INIT_NO_RESET)) _reset(oh); - /* - * OCP_SYSCONFIG bits need to be reprogrammed after a softreset. - * The _enable() function should be split to - * avoid the rewrite of the OCP_SYSCONFIG register. - */ - if (oh->class->sysc) { - _update_sysc_cache(oh); - _enable_sysc(oh); - } - } - postsetup_state = oh->_postsetup_state; if (postsetup_state == _HWMOD_STATE_UNKNOWN) postsetup_state = _HWMOD_STATE_ENABLED; @@ -1907,20 +1903,10 @@ void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs) */ int omap_hwmod_softreset(struct omap_hwmod *oh) { - u32 v; - int ret; - - if (!oh || !(oh->_sysc_cache)) + if (!oh) return -EINVAL; - v = oh->_sysc_cache; - ret = _set_softreset(oh, &v); - if (ret) - goto error; - _write_sysconfig(v, oh); - -error: - return ret; + return _ocp_softreset(oh); } /** @@ -2463,26 +2449,28 @@ int omap_hwmod_del_initiator_dep(struct omap_hwmod *oh, * @oh: struct omap_hwmod * * * Sets the module OCP socket ENAWAKEUP bit to allow the module to - * send wakeups to the PRCM. Eventually this should sets PRCM wakeup - * registers to cause the PRCM to receive wakeup events from the - * module. Does not set any wakeup routing registers beyond this - * point - if the module is to wake up any other module or subsystem, - * that must be set separately. Called by omap_device code. Returns - * -EINVAL on error or 0 upon success. + * send wakeups to the PRCM, and enable I/O ring wakeup events for + * this IP block if it has dynamic mux entries. Eventually this + * should set PRCM wakeup registers to cause the PRCM to receive + * wakeup events from the module. Does not set any wakeup routing + * registers beyond this point - if the module is to wake up any other + * module or subsystem, that must be set separately. Called by + * omap_device code. Returns -EINVAL on error or 0 upon success. */ int omap_hwmod_enable_wakeup(struct omap_hwmod *oh) { unsigned long flags; u32 v; - if (!oh->class->sysc || - !(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)) - return -EINVAL; - spin_lock_irqsave(&oh->_lock, flags); - v = oh->_sysc_cache; - _enable_wakeup(oh, &v); - _write_sysconfig(v, oh); + + if (oh->class->sysc && + (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)) { + v = oh->_sysc_cache; + _enable_wakeup(oh, &v); + _write_sysconfig(v, oh); + } + _set_idle_ioring_wakeup(oh, true); spin_unlock_irqrestore(&oh->_lock, flags); @@ -2494,26 +2482,28 @@ int omap_hwmod_enable_wakeup(struct omap_hwmod *oh) * @oh: struct omap_hwmod * * * Clears the module OCP socket ENAWAKEUP bit to prevent the module - * from sending wakeups to the PRCM. Eventually this should clear - * PRCM wakeup registers to cause the PRCM to ignore wakeup events - * from the module. Does not set any wakeup routing registers beyond - * this point - if the module is to wake up any other module or - * subsystem, that must be set separately. Called by omap_device - * code. Returns -EINVAL on error or 0 upon success. + * from sending wakeups to the PRCM, and disable I/O ring wakeup + * events for this IP block if it has dynamic mux entries. Eventually + * this should clear PRCM wakeup registers to cause the PRCM to ignore + * wakeup events from the module. Does not set any wakeup routing + * registers beyond this point - if the module is to wake up any other + * module or subsystem, that must be set separately. Called by + * omap_device code. Returns -EINVAL on error or 0 upon success. */ int omap_hwmod_disable_wakeup(struct omap_hwmod *oh) { unsigned long flags; u32 v; - if (!oh->class->sysc || - !(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)) - return -EINVAL; - spin_lock_irqsave(&oh->_lock, flags); - v = oh->_sysc_cache; - _disable_wakeup(oh, &v); - _write_sysconfig(v, oh); + + if (oh->class->sysc && + (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)) { + v = oh->_sysc_cache; + _disable_wakeup(oh, &v); + _write_sysconfig(v, oh); + } + _set_idle_ioring_wakeup(oh, false); spin_unlock_irqrestore(&oh->_lock, flags); diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 08daa5e0eb5f..cc9bd106a854 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2996,6 +2996,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcbsp1_slaves[] = { &omap44xx_l4_abe__mcbsp1_dma, }; +static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = { + { .role = "pad_fck", .clk = "pad_clks_ck" }, + { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" }, +}; + static struct omap_hwmod omap44xx_mcbsp1_hwmod = { .name = "mcbsp1", .class = &omap44xx_mcbsp_hwmod_class, @@ -3012,6 +3017,8 @@ static struct omap_hwmod omap44xx_mcbsp1_hwmod = { }, .slaves = omap44xx_mcbsp1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp1_slaves), + .opt_clks = mcbsp1_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(mcbsp1_opt_clks), }; /* mcbsp2 */ @@ -3071,6 +3078,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcbsp2_slaves[] = { &omap44xx_l4_abe__mcbsp2_dma, }; +static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = { + { .role = "pad_fck", .clk = "pad_clks_ck" }, + { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" }, +}; + static struct omap_hwmod omap44xx_mcbsp2_hwmod = { .name = "mcbsp2", .class = &omap44xx_mcbsp_hwmod_class, @@ -3087,6 +3099,8 @@ static struct omap_hwmod omap44xx_mcbsp2_hwmod = { }, .slaves = omap44xx_mcbsp2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp2_slaves), + .opt_clks = mcbsp2_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(mcbsp2_opt_clks), }; /* mcbsp3 */ @@ -3146,6 +3160,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcbsp3_slaves[] = { &omap44xx_l4_abe__mcbsp3_dma, }; +static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = { + { .role = "pad_fck", .clk = "pad_clks_ck" }, + { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" }, +}; + static struct omap_hwmod omap44xx_mcbsp3_hwmod = { .name = "mcbsp3", .class = &omap44xx_mcbsp_hwmod_class, @@ -3162,6 +3181,8 @@ static struct omap_hwmod omap44xx_mcbsp3_hwmod = { }, .slaves = omap44xx_mcbsp3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp3_slaves), + .opt_clks = mcbsp3_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(mcbsp3_opt_clks), }; /* mcbsp4 */ @@ -3200,6 +3221,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcbsp4_slaves[] = { &omap44xx_l4_per__mcbsp4, }; +static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = { + { .role = "pad_fck", .clk = "pad_clks_ck" }, + { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" }, +}; + static struct omap_hwmod omap44xx_mcbsp4_hwmod = { .name = "mcbsp4", .class = &omap44xx_mcbsp_hwmod_class, @@ -3216,6 +3242,8 @@ static struct omap_hwmod omap44xx_mcbsp4_hwmod = { }, .slaves = omap44xx_mcbsp4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp4_slaves), + .opt_clks = mcbsp4_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(mcbsp4_opt_clks), }; /* diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c index 9262a6b47702..de6d46451746 100644 --- a/arch/arm/mach-omap2/opp.c +++ b/arch/arm/mach-omap2/opp.c @@ -64,10 +64,10 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def, } oh = omap_hwmod_lookup(opp_def->hwmod_name); if (!oh || !oh->od) { - pr_warn("%s: no hwmod or odev for %s, [%d] " + pr_debug("%s: no hwmod or odev for %s, [%d] " "cannot add OPPs.\n", __func__, opp_def->hwmod_name, i); - return -EINVAL; + continue; } dev = &oh->od->pdev->dev; diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 238defc6f6df..703bd1099259 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -153,8 +153,7 @@ static void omap3_save_secure_ram_context(void) pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ if (ret) { - printk(KERN_ERR "save_secure_sram() returns %08x\n", - ret); + pr_err("save_secure_sram() returns %08x\n", ret); while (1) ; } @@ -289,7 +288,7 @@ void omap_sram_idle(void) break; default: /* Invalid state */ - printk(KERN_ERR "Invalid mpu state in sram_idle\n"); + pr_err("Invalid mpu state in sram_idle\n"); return; } @@ -439,18 +438,17 @@ restore: list_for_each_entry(pwrst, &pwrst_list, node) { state = pwrdm_read_prev_pwrst(pwrst->pwrdm); if (state > pwrst->next_state) { - printk(KERN_INFO "Powerdomain (%s) didn't enter " - "target state %d\n", + pr_info("Powerdomain (%s) didn't enter " + "target state %d\n", pwrst->pwrdm->name, pwrst->next_state); ret = -1; } omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); } if (ret) - printk(KERN_ERR "Could not enter target state in pm_suspend\n"); + pr_err("Could not enter target state in pm_suspend\n"); else - printk(KERN_INFO "Successfully put all powerdomains " - "to target state\n"); + pr_info("Successfully put all powerdomains to target state\n"); return ret; } @@ -734,21 +732,22 @@ static int __init omap3_pm_init(void) if (ret) { pr_err("pm: Failed to request pm_io irq\n"); - goto err1; + goto err2; } ret = pwrdm_for_each(pwrdms_setup, NULL); if (ret) { - printk(KERN_ERR "Failed to setup powerdomains\n"); - goto err2; + pr_err("Failed to setup powerdomains\n"); + goto err3; } (void) clkdm_for_each(omap_pm_clkdms_setup, NULL); mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if (mpu_pwrdm == NULL) { - printk(KERN_ERR "Failed to get mpu_pwrdm\n"); - goto err2; + pr_err("Failed to get mpu_pwrdm\n"); + ret = -EINVAL; + goto err3; } neon_pwrdm = pwrdm_lookup("neon_pwrdm"); @@ -781,8 +780,8 @@ static int __init omap3_pm_init(void) omap3_secure_ram_storage = kmalloc(0x803F, GFP_KERNEL); if (!omap3_secure_ram_storage) - printk(KERN_ERR "Memory allocation failed when" - "allocating for secure sram context\n"); + pr_err("Memory allocation failed when " + "allocating for secure sram context\n"); local_irq_disable(); local_fiq_disable(); @@ -796,14 +795,17 @@ static int __init omap3_pm_init(void) } omap3_save_scratchpad_contents(); -err1: return ret; -err2: - free_irq(INT_34XX_PRCM_MPU_IRQ, NULL); + +err3: list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) { list_del(&pwrst->node); kfree(pwrst); } + free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init); +err2: + free_irq(omap_prcm_event_to_irq("wkup"), NULL); +err1: return ret; } diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 9ccaadc2cf07..885625352429 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -144,7 +144,7 @@ static void omap_default_idle(void) static int __init omap4_pm_init(void) { int ret; - struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm; + struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm, *l4wkup; struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm; if (!cpu_is_omap44xx()) @@ -168,14 +168,19 @@ static int __init omap4_pm_init(void) * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as * expected. The hardware recommendation is to enable static * dependencies for these to avoid system lock ups or random crashes. + * The L4 wakeup depedency is added to workaround the OCP sync hardware + * BUG with 32K synctimer which lead to incorrect timer value read + * from the 32K counter. The BUG applies for GPTIMER1 and WDT2 which + * are part of L4 wakeup clockdomain. */ mpuss_clkdm = clkdm_lookup("mpuss_clkdm"); emif_clkdm = clkdm_lookup("l3_emif_clkdm"); l3_1_clkdm = clkdm_lookup("l3_1_clkdm"); l3_2_clkdm = clkdm_lookup("l3_2_clkdm"); l4_per_clkdm = clkdm_lookup("l4_per_clkdm"); + l4wkup = clkdm_lookup("l4_wkup_clkdm"); ducati_clkdm = clkdm_lookup("ducati_clkdm"); - if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) || + if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) || (!l4wkup) || (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm)) goto err2; @@ -183,6 +188,7 @@ static int __init omap4_pm_init(void) ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm); ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm); ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm); + ret |= clkdm_add_wkdep(mpuss_clkdm, l4wkup); ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm); ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm); if (ret) { diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 8a18d1bd61c8..96ad3dbeac34 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c @@ -972,7 +972,13 @@ int pwrdm_wait_transition(struct powerdomain *pwrdm) int pwrdm_state_switch(struct powerdomain *pwrdm) { - return _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW); + int ret; + + ret = pwrdm_wait_transition(pwrdm); + if (!ret) + ret = _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW); + + return ret; } int pwrdm_clkdm_state_switch(struct clockdomain *clkdm) diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index eac623c7c3d8..f106d21ff581 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -147,8 +147,9 @@ static inline u32 _read_pending_irq_reg(u16 irqen_offs, u16 irqst_offs) u32 mask, st; /* XXX read mask from RAM? */ - mask = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqen_offs); - st = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqst_offs); + mask = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, + irqen_offs); + st = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, irqst_offs); return mask & st; } @@ -180,7 +181,7 @@ void omap44xx_prm_read_pending_irqs(unsigned long *events) */ void omap44xx_prm_ocp_barrier(void) { - omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, + omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_REVISION_PRM_OFFSET); } @@ -198,19 +199,19 @@ void omap44xx_prm_ocp_barrier(void) void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) { saved_mask[0] = - omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, + omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQSTATUS_MPU_OFFSET); saved_mask[1] = - omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, + omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); - omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST, + omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_OFFSET); - omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST, + omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); /* OCP barrier */ - omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, + omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_REVISION_PRM_OFFSET); } @@ -226,9 +227,9 @@ void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) */ void omap44xx_prm_restore_irqen(u32 *saved_mask) { - omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_DEVICE_INST, + omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_OFFSET); - omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_DEVICE_INST, + omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); } diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 873b51d494ea..d28f848897d6 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c @@ -290,7 +290,7 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup) goto err; } - for (i = 0; i <= irq_setup->nr_regs; i++) { + for (i = 0; i < irq_setup->nr_regs; i++) { gc = irq_alloc_generic_chip("PRCM", 1, irq_setup->base_irq + i * 32, prm_base, handle_level_irq); diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c index f51348dafafd..dde8a11f47d5 100644 --- a/arch/arm/mach-omap2/usb-host.c +++ b/arch/arm/mach-omap2/usb-host.c @@ -54,7 +54,7 @@ static struct omap_device_pm_latency omap_uhhtll_latency[] = { /* * setup_ehci_io_mux - initialize IO pad mux for USBHOST */ -static void setup_ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) +static void __init setup_ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) { switch (port_mode[0]) { case OMAP_EHCI_PORT_MODE_PHY: @@ -197,7 +197,8 @@ static void setup_ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) return; } -static void setup_4430ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) +static +void __init setup_4430ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) { switch (port_mode[0]) { case OMAP_EHCI_PORT_MODE_PHY: @@ -315,7 +316,7 @@ static void setup_4430ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) } } -static void setup_ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) +static void __init setup_ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) { switch (port_mode[0]) { case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: @@ -412,7 +413,8 @@ static void setup_ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) } } -static void setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) +static +void __init setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) { switch (port_mode[0]) { case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 109ccd2a8885..fe2d1f80ef50 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -113,6 +113,7 @@ config MACH_ARMCORE select IWMMXT select PXA25x select MIGHT_HAVE_PCI + select NEED_MACH_IO_H if PCI config MACH_EM_X270 bool "CompuLab EM-x270 platform" diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h new file mode 100644 index 000000000000..cd78b7fe3567 --- /dev/null +++ b/arch/arm/mach-pxa/include/mach/io.h @@ -0,0 +1,17 @@ +/* + * arch/arm/mach-pxa/include/mach/io.h + * + * Copied from asm/arch/sa1100/io.h + */ +#ifndef __ASM_ARM_ARCH_IO_H +#define __ASM_ARM_ARCH_IO_H + +#define IO_SPACE_LIMIT 0xffffffff + +/* + * We don't actually have real ISA nor PCI buses, but there is so many + * drivers out there that might just work if we fake them... + */ +#define __io(a) __typesafe_io(a) + +#endif diff --git a/arch/arm/mach-s3c24xx/common.h b/arch/arm/mach-s3c24xx/common.h new file mode 100644 index 000000000000..c2f596e7bc2d --- /dev/null +++ b/arch/arm/mach-s3c24xx/common.h @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Common Header for S3C24XX SoCs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ARCH_ARM_MACH_S3C24XX_COMMON_H +#define __ARCH_ARM_MACH_S3C24XX_COMMON_H __FILE__ + +void s3c2410_restart(char mode, const char *cmd); +void s3c244x_restart(char mode, const char *cmd); + +#endif /* __ARCH_ARM_MACH_S3C24XX_COMMON_H */ diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index 48885b7efd6b..c7f418b0cde9 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c @@ -313,6 +313,10 @@ static struct sa1100fb_mach_info collie_lcd_info = { .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act, .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2), + +#ifdef CONFIG_BACKLIGHT_LOCOMO + .lcd_power = locomolcd_power +#endif }; static void __init collie_init(void) diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h index 52acda7061b7..f33679d2d3ee 100644 --- a/arch/arm/mach-sa1100/include/mach/collie.h +++ b/arch/arm/mach-sa1100/include/mach/collie.h @@ -1,7 +1,7 @@ /* * arch/arm/mach-sa1100/include/mach/collie.h * - * This file contains the hardware specific definitions for Assabet + * This file contains the hardware specific definitions for Collie * Only include this file from SA1100-specific files. * * ChangeLog: @@ -13,6 +13,7 @@ #ifndef __ASM_ARCH_COLLIE_H #define __ASM_ARCH_COLLIE_H +extern void locomolcd_power(int on); #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1) #define COLLIE_GPIO_CHARGE_ON (COLLIE_SCOOP_GPIO_BASE + 0) diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index a6e23f464528..d2268be8c34c 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -190,7 +190,7 @@ static struct resource pre_mem = { .flags = IORESOURCE_MEM | IORESOURCE_PREFETCH, }; -static int __init pci_versatile_setup_resources(struct list_head *resources) +static int __init pci_versatile_setup_resources(struct pci_sys_data *sys) { int ret = 0; @@ -218,9 +218,9 @@ static int __init pci_versatile_setup_resources(struct list_head *resources) * the mem resource for this bus * the prefetch mem resource for this bus */ - pci_add_resource_offset(resources, &io_mem, sys->io_offset); - pci_add_resource_offset(resources, &non_mem, sys->mem_offset); - pci_add_resource_offset(resources, &pre_mem, sys->mem_offset); + pci_add_resource_offset(&sys->resources, &io_mem, sys->io_offset); + pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset); + pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset); goto out; @@ -249,7 +249,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys) if (nr == 0) { sys->mem_offset = 0; - ret = pci_versatile_setup_resources(&sys->resources); + ret = pci_versatile_setup_resources(sys); if (ret < 0) { printk("pci_versatile_setup: resources... oops?\n"); goto out; diff --git a/arch/arm/plat-mxc/3ds_debugboard.c b/arch/arm/plat-mxc/3ds_debugboard.c index d1e31fa1b0c3..5cac2c540f4f 100644 --- a/arch/arm/plat-mxc/3ds_debugboard.c +++ b/arch/arm/plat-mxc/3ds_debugboard.c @@ -80,7 +80,7 @@ static struct smsc911x_platform_config smsc911x_config = { static struct platform_device smsc_lan9217_device = { .name = "smsc911x", - .id = 0, + .id = -1, .dev = { .platform_data = &smsc911x_config, }, diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index ce1e9b96ba1a..ad95c7a5d009 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -17,6 +17,7 @@ config ARCH_OMAP1 select IRQ_DOMAIN select HAVE_IDE select NEED_MACH_MEMORY_H + select NEED_MACH_IO_H if PCCARD help "Systems based on omap7xx, omap15xx or omap16xx" diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c index 56b6f8b7053e..8506cbb7fea4 100644 --- a/arch/arm/plat-omap/clock.c +++ b/arch/arm/plat-omap/clock.c @@ -441,6 +441,8 @@ static int __init clk_disable_unused(void) return 0; pr_info("clock: disabling unused clocks to save power\n"); + + spin_lock_irqsave(&clockfw_lock, flags); list_for_each_entry(ck, &clocks, node) { if (ck->ops == &clkops_null) continue; @@ -448,10 +450,9 @@ static int __init clk_disable_unused(void) if (ck->usecount > 0 || !ck->enable_reg) continue; - spin_lock_irqsave(&clockfw_lock, flags); arch_clock->clk_disable_unused(ck); - spin_unlock_irqrestore(&clockfw_lock, flags); } + spin_unlock_irqrestore(&clockfw_lock, flags); return 0; } diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h index 9e8e63d52aab..8070145ccb98 100644 --- a/arch/arm/plat-omap/include/plat/omap_hwmod.h +++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h @@ -47,17 +47,17 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2; * with the original PRCM protocol defined for OMAP2420 */ #define SYSC_TYPE1_MIDLEMODE_SHIFT 12 -#define SYSC_TYPE1_MIDLEMODE_MASK (0x3 << SYSC_MIDLEMODE_SHIFT) +#define SYSC_TYPE1_MIDLEMODE_MASK (0x3 << SYSC_TYPE1_MIDLEMODE_SHIFT) #define SYSC_TYPE1_CLOCKACTIVITY_SHIFT 8 -#define SYSC_TYPE1_CLOCKACTIVITY_MASK (0x3 << SYSC_CLOCKACTIVITY_SHIFT) +#define SYSC_TYPE1_CLOCKACTIVITY_MASK (0x3 << SYSC_TYPE1_CLOCKACTIVITY_SHIFT) #define SYSC_TYPE1_SIDLEMODE_SHIFT 3 -#define SYSC_TYPE1_SIDLEMODE_MASK (0x3 << SYSC_SIDLEMODE_SHIFT) +#define SYSC_TYPE1_SIDLEMODE_MASK (0x3 << SYSC_TYPE1_SIDLEMODE_SHIFT) #define SYSC_TYPE1_ENAWAKEUP_SHIFT 2 -#define SYSC_TYPE1_ENAWAKEUP_MASK (1 << SYSC_ENAWAKEUP_SHIFT) +#define SYSC_TYPE1_ENAWAKEUP_MASK (1 << SYSC_TYPE1_ENAWAKEUP_SHIFT) #define SYSC_TYPE1_SOFTRESET_SHIFT 1 -#define SYSC_TYPE1_SOFTRESET_MASK (1 << SYSC_SOFTRESET_SHIFT) +#define SYSC_TYPE1_SOFTRESET_MASK (1 << SYSC_TYPE1_SOFTRESET_SHIFT) #define SYSC_TYPE1_AUTOIDLE_SHIFT 0 -#define SYSC_TYPE1_AUTOIDLE_MASK (1 << SYSC_AUTOIDLE_SHIFT) +#define SYSC_TYPE1_AUTOIDLE_MASK (1 << SYSC_TYPE1_AUTOIDLE_SHIFT) /* * OCP SYSCONFIG bit shifts/masks TYPE2. These are for IPs compliant diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h index 808001c9cf8c..0961275373db 100644 --- a/arch/avr32/include/asm/barrier.h +++ b/arch/avr32/include/asm/barrier.h @@ -8,6 +8,8 @@ #ifndef __ASM_AVR32_BARRIER_H #define __ASM_AVR32_BARRIER_H +#define nop() asm volatile("nop") + #define mb() asm volatile("" : : : "memory") #define rmb() mb() #define wmb() asm volatile("sync 0" : : : "memory") diff --git a/arch/avr32/include/asm/special_insns.h b/arch/avr32/include/asm/special_insns.h deleted file mode 100644 index f922218dfaa5..000000000000 --- a/arch/avr32/include/asm/special_insns.h +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright (C) 2004-2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_AVR32_SPECIAL_INSNS_H -#define __ASM_AVR32_SPECIAL_INSNS_H - -#define nop() asm volatile("nop") - -#endif /* __ASM_AVR32_SPECIAL_INSNS_H */ diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h index 71733866cb4f..70742ec997f8 100644 --- a/arch/avr32/mach-at32ap/include/mach/board.h +++ b/arch/avr32/mach-at32ap/include/mach/board.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include <linux/serial.h> #include <linux/platform_data/macb.h> -#include <linux/platform_data/atmel_nand.h> +#include <linux/platform_data/atmel.h> #define GPIO_PIN_NONE (-1) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index c1269a1085e1..373a6902d8fa 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -823,7 +823,7 @@ config CACHELINE_ALIGNED_L1 bool "Locate cacheline_aligned data to L1 Data Memory" default y if !BF54x default n if BF54x - depends on !SMP && !BF531 + depends on !SMP && !BF531 && !CRC32 help If enabled, cacheline_aligned data is linked into L1 data memory. (less latency) diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig index 9ccc18a6b4df..90b175323644 100644 --- a/arch/blackfin/configs/BF527-EZKIT_defconfig +++ b/arch/blackfin/configs/BF527-EZKIT_defconfig @@ -147,6 +147,7 @@ CONFIG_USB_OTG_BLACKLIST_HUB=y CONFIG_USB_MON=y CONFIG_USB_MUSB_HDRC=y CONFIG_USB_MUSB_BLACKFIN=y +CONFIG_MUSB_PIO_ONLY=y CONFIG_USB_STORAGE=y CONFIG_USB_GADGET=y CONFIG_RTC_CLASS=y diff --git a/arch/blackfin/include/asm/cmpxchg.h b/arch/blackfin/include/asm/cmpxchg.h index ba2484f4cb2a..c05868cc61c1 100644 --- a/arch/blackfin/include/asm/cmpxchg.h +++ b/arch/blackfin/include/asm/cmpxchg.h @@ -122,7 +122,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, (unsigned long)(n), sizeof(*(ptr)))) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#include <asm-generic/cmpxchg.h> +#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) +#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #endif /* !CONFIG_SMP */ diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h index 5a25856381ff..12d3571b5232 100644 --- a/arch/blackfin/include/asm/gpio.h +++ b/arch/blackfin/include/asm/gpio.h @@ -244,16 +244,26 @@ static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) return -EINVAL; } -static inline int gpio_get_value(unsigned gpio) +static inline int __gpio_get_value(unsigned gpio) { return bfin_gpio_get_value(gpio); } -static inline void gpio_set_value(unsigned gpio, int value) +static inline void __gpio_set_value(unsigned gpio, int value) { return bfin_gpio_set_value(gpio, value); } +static inline int gpio_get_value(unsigned gpio) +{ + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value(unsigned gpio, int value) +{ + return __gpio_set_value(gpio, value); +} + static inline int gpio_to_irq(unsigned gpio) { if (likely(gpio < MAX_BLACKFIN_GPIOS)) diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c index 304f675826e9..3b5a05099989 100644 --- a/arch/c6x/kernel/signal.c +++ b/arch/c6x/kernel/signal.c @@ -85,10 +85,7 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; @@ -279,15 +276,8 @@ static int handle_signal(int sig, /* Set up the stack frame */ ret = setup_rt_frame(sig, ka, info, oldset, regs); - if (ret == 0) { - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked, ¤t->blocked, - &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - } + if (ret == 0) + block_sigmask(ka, sig); return ret; } diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 41098a3803a2..4f8d8bcdc7de 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c @@ -13,6 +13,7 @@ #include <linux/dma-mapping.h> #include <linux/list.h> #include <linux/pci.h> +#include <linux/export.h> #include <linux/highmem.h> #include <linux/scatterlist.h> #include <asm/io.h> diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 448b224ba4ef..233ed3d2d25e 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -71,29 +71,35 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return (dma_addr == bad_dma_address); } -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { void *ret; struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!dma_ops); - ret = ops->alloc_coherent(dev, size, dma_handle, flag); + ret = ops->alloc(dev, size, dma_handle, flag, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, ret); return ret; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct dma_map_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); + dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); } diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index e711ace62fdf..37302218ca4a 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -54,7 +54,8 @@ static struct gen_pool *coherent_pool; /* Allocates from a pool of uncached memory that was reserved at boot time */ void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag) + dma_addr_t *dma_addr, gfp_t flag, + struct dma_attrs *attrs) { void *ret; @@ -81,7 +82,7 @@ void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, } static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr) + dma_addr_t dma_addr, struct dma_attrs *attrs) { gen_pool_free(coherent_pool, (unsigned long) vaddr, size); } @@ -202,8 +203,8 @@ static void hexagon_sync_single_for_device(struct device *dev, } struct dma_map_ops hexagon_dma_ops = { - .alloc_coherent = hexagon_dma_alloc_coherent, - .free_coherent = hexagon_free_coherent, + .alloc = hexagon_dma_alloc_coherent, + .free = hexagon_free_coherent, .map_sg = hexagon_map_sg, .map_page = hexagon_map_page, .sync_single_for_cpu = hexagon_sync_single_for_cpu, diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index f6ea3a3b4a84..bcda5b2d121a 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1129,7 +1129,8 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, * See Documentation/DMA-API-HOWTO.txt */ static void * -sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) +sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flags, struct dma_attrs *attrs) { struct ioc *ioc; void *addr; @@ -1191,8 +1192,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp * * See Documentation/DMA-API-HOWTO.txt */ -static void sba_free_coherent (struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) +static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, struct dma_attrs *attrs) { sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); free_pages((unsigned long) vaddr, get_order(size)); @@ -2212,8 +2213,8 @@ sba_page_override(char *str) __setup("sbapagesize=",sba_page_override); struct dma_map_ops sba_dma_ops = { - .alloc_coherent = sba_alloc_coherent, - .free_coherent = sba_free_coherent, + .alloc = sba_alloc_coherent, + .free = sba_free_coherent, .map_page = sba_map_page, .unmap_page = sba_unmap_page, .map_sg = sba_map_sg_attrs, diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 4336d080b241..4f5e8148440d 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -23,23 +23,29 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *daddr, gfp_t gfp) +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *daddr, gfp_t gfp, + struct dma_attrs *attrs) { struct dma_map_ops *ops = platform_dma_get_ops(dev); void *caddr; - caddr = ops->alloc_coherent(dev, size, daddr, gfp); + caddr = ops->alloc(dev, size, daddr, gfp, attrs); debug_dma_alloc_coherent(dev, size, *daddr, caddr); return caddr; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *caddr, dma_addr_t daddr) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *caddr, dma_addr_t daddr, + struct dma_attrs *attrs) { struct dma_map_ops *ops = platform_dma_get_ops(dev); debug_dma_free_coherent(dev, size, caddr, daddr); - ops->free_coherent(dev, size, caddr, daddr); + ops->free(dev, size, caddr, daddr, attrs); } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index d9485d952ed0..939260aeac98 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -15,16 +15,24 @@ int swiotlb __read_mostly; EXPORT_SYMBOL(swiotlb); static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) gfp |= GFP_DMA; return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); } +static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, + struct dma_attrs *attrs) +{ + swiotlb_free_coherent(dev, size, vaddr, dma_addr); +} + struct dma_map_ops swiotlb_dma_ops = { - .alloc_coherent = ia64_swiotlb_alloc_coherent, - .free_coherent = swiotlb_free_coherent, + .alloc = ia64_swiotlb_alloc_coherent, + .free = ia64_swiotlb_free_coherent, .map_page = swiotlb_map_page, .unmap_page = swiotlb_unmap_page, .map_sg = swiotlb_map_sg_attrs, diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index a9d310de57da..3290d6e00c31 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -76,7 +76,8 @@ EXPORT_SYMBOL(sn_dma_set_mask); * more information. */ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t flags) + dma_addr_t * dma_handle, gfp_t flags, + struct dma_attrs *attrs) { void *cpuaddr; unsigned long phys_addr; @@ -137,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, * any associated IOMMU mappings. */ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) + dma_addr_t dma_handle, struct dma_attrs *attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); @@ -466,8 +467,8 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) } static struct dma_map_ops sn_dma_ops = { - .alloc_coherent = sn_dma_alloc_coherent, - .free_coherent = sn_dma_free_coherent, + .alloc = sn_dma_alloc_coherent, + .free = sn_dma_free_coherent, .map_page = sn_dma_map_page, .unmap_page = sn_dma_unmap_page, .map_sg = sn_dma_map_sg, diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 336e6173794f..f4e32de263a7 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/irqflags.h> +#include <asm/cmpxchg.h> /* * Atomic operations that C can't guarantee us. Useful for diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 96fa6ed7e799..d9f62e0f46c0 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -980,6 +980,9 @@ int __init mac_platform_init(void) { u8 *swim_base; + if (!MACH_IS_MAC) + return -ENODEV; + /* * Serial devices */ diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index 512adb64f7dd..8a1ce327c963 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c @@ -334,6 +334,9 @@ static __init int q40_add_kbd_device(void) { struct platform_device *pdev; + if (!MACH_IS_Q40) + return -ENODEV; + pdev = platform_device_register_simple("q40kbd", -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h index 0094859abd9b..538afc0ab9f3 100644 --- a/arch/microblaze/include/asm/cmpxchg.h +++ b/arch/microblaze/include/asm/cmpxchg.h @@ -1,6 +1,8 @@ #ifndef _ASM_MICROBLAZE_CMPXCHG_H #define _ASM_MICROBLAZE_CMPXCHG_H +#include <linux/irqflags.h> + void __bad_xchg(volatile void *ptr, int size); static inline unsigned long __xchg(unsigned long x, volatile void *ptr, diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 3a3e5b886854..01d228286cb0 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -123,28 +123,34 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) +#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); void *memory; BUG_ON(!ops); - memory = ops->alloc_coherent(dev, size, dma_handle, flag); + memory = ops->alloc(dev, size, dma_handle, flag, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, memory); return memory; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); } static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index b0526d2716fa..ff8cde159d9a 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h @@ -24,7 +24,7 @@ .word 1b,4b,2b,4b; \ .previous;" \ : "=&r" (oldval), "=&r" (ret) \ - : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ + : "r" (uaddr), "i" (-EFAULT), "r" (oparg) \ ); \ }) diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 510a8e1c16ba..bffb54527299 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h @@ -31,6 +31,8 @@ extern const struct seq_operations cpuinfo_op; /* Do necessary setup to start up a newly executed thread. */ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp); +extern void ret_from_fork(void); + # endif /* __ASSEMBLY__ */ # ifndef CONFIG_MMU @@ -143,8 +145,6 @@ static inline void exit_thread(void) unsigned long get_wchan(struct task_struct *p); -extern void ret_from_fork(void); - /* The size allocated for kernel stacks. This _must_ be a power of two! */ # define KERNEL_STACK_SIZE 0x2000 diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 65a4af4cbbbe..a2bfa2ca5730 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -33,7 +33,8 @@ static unsigned long get_dma_direct_offset(struct device *dev) #define NOT_COHERENT_CACHE static void *dma_direct_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { #ifdef NOT_COHERENT_CACHE return consistent_alloc(flag, size, dma_handle); @@ -57,7 +58,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, } static void dma_direct_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { #ifdef NOT_COHERENT_CACHE consistent_free(size, vaddr); @@ -176,8 +178,8 @@ dma_direct_sync_sg_for_device(struct device *dev, } struct dma_map_ops dma_direct_ops = { - .alloc_coherent = dma_direct_alloc_coherent, - .free_coherent = dma_direct_free_coherent, + .alloc = dma_direct_alloc_coherent, + .free = dma_direct_free_coherent, .map_sg = dma_direct_map_sg, .unmap_sg = dma_direct_unmap_sg, .dma_supported = dma_direct_dma_supported, diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c index ec485876d0d0..aba1f9a97d5d 100644 --- a/arch/microblaze/kernel/early_printk.c +++ b/arch/microblaze/kernel/early_printk.c @@ -176,6 +176,7 @@ void __init remap_early_printk(void) base_addr = (u32) ioremap(base_addr, PAGE_SIZE); printk(KERN_CONT "0x%x\n", base_addr); +#ifdef CONFIG_MMU /* * Early console is on the top of skipped TLB entries * decrease tlb_skip size ensure that hardcoded TLB entry will be @@ -189,6 +190,7 @@ void __init remap_early_printk(void) * cmp rX, orig_base_addr */ tlb_skip -= 1; +#endif } void __init disable_early_printk(void) diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 71af974aa24a..16d8dfd9094b 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -206,6 +206,7 @@ static int microblaze_debugfs_init(void) } arch_initcall(microblaze_debugfs_init); +# ifdef CONFIG_MMU static int __init debugfs_tlb(void) { struct dentry *d; @@ -218,6 +219,7 @@ static int __init debugfs_tlb(void) return -ENOMEM; } device_initcall(debugfs_tlb); +# endif #endif static int dflt_bus_notify(struct notifier_block *nb, diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c index 9781a528cfc9..6be4ae3c3351 100644 --- a/arch/microblaze/kernel/unwind.c +++ b/arch/microblaze/kernel/unwind.c @@ -24,6 +24,7 @@ #include <asm/sections.h> #include <asm/exceptions.h> #include <asm/unwind.h> +#include <asm/switch_to.h> struct stack_trace; diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S index f037266cdaf3..f085995ee848 100644 --- a/arch/microblaze/lib/uaccess_old.S +++ b/arch/microblaze/lib/uaccess_old.S @@ -122,22 +122,22 @@ __strnlen_user: 15: swi r24, r5, 0x0018 + offset; \ 16: swi r25, r5, 0x001C + offset; \ .section __ex_table,"a"; \ - .word 1b, 0f; \ - .word 2b, 0f; \ - .word 3b, 0f; \ - .word 4b, 0f; \ - .word 5b, 0f; \ - .word 6b, 0f; \ - .word 7b, 0f; \ - .word 8b, 0f; \ - .word 9b, 0f; \ - .word 10b, 0f; \ - .word 11b, 0f; \ - .word 12b, 0f; \ - .word 13b, 0f; \ - .word 14b, 0f; \ - .word 15b, 0f; \ - .word 16b, 0f; \ + .word 1b, 33f; \ + .word 2b, 33f; \ + .word 3b, 33f; \ + .word 4b, 33f; \ + .word 5b, 33f; \ + .word 6b, 33f; \ + .word 7b, 33f; \ + .word 8b, 33f; \ + .word 9b, 33f; \ + .word 10b, 33f; \ + .word 11b, 33f; \ + .word 12b, 33f; \ + .word 13b, 33f; \ + .word 14b, 33f; \ + .word 15b, 33f; \ + .word 16b, 33f; \ .text #define COPY_80(offset) \ @@ -190,14 +190,17 @@ w2: sw r4, r5, r3 .align 4 /* Alignment is important to keep icache happy */ page: /* Create room on stack and save registers for storign values */ - addik r1, r1, -32 - swi r19, r1, 4 - swi r20, r1, 8 - swi r21, r1, 12 - swi r22, r1, 16 - swi r23, r1, 20 - swi r24, r1, 24 - swi r25, r1, 28 + addik r1, r1, -40 + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r19, r1, 12 + swi r20, r1, 16 + swi r21, r1, 20 + swi r22, r1, 24 + swi r23, r1, 28 + swi r24, r1, 32 + swi r25, r1, 36 loop: /* r4, r19, r20, r21, r22, r23, r24, r25 are used for storing values */ /* Loop unrolling to get performance boost */ COPY_80(0x000); @@ -205,21 +208,44 @@ loop: /* r4, r19, r20, r21, r22, r23, r24, r25 are used for storing values */ COPY_80(0x100); COPY_80(0x180); /* copy loop */ - addik r6, r6, 0x200 - addik r7, r7, -0x200 - bneid r7, loop - addik r5, r5, 0x200 + addik r6, r6, 0x200 + addik r7, r7, -0x200 + bneid r7, loop + addik r5, r5, 0x200 + /* Restore register content */ - lwi r19, r1, 4 - lwi r20, r1, 8 - lwi r21, r1, 12 - lwi r22, r1, 16 - lwi r23, r1, 20 - lwi r24, r1, 24 - lwi r25, r1, 28 - addik r1, r1, 32 + lwi r5, r1, 0 + lwi r6, r1, 4 + lwi r7, r1, 8 + lwi r19, r1, 12 + lwi r20, r1, 16 + lwi r21, r1, 20 + lwi r22, r1, 24 + lwi r23, r1, 28 + lwi r24, r1, 32 + lwi r25, r1, 36 + addik r1, r1, 40 /* return back */ + addik r3, r0, 0 + rtsd r15, 8 + nop + +/* Fault case - return temp count */ +33: addik r3, r7, 0 + /* Restore register content */ + lwi r5, r1, 0 + lwi r6, r1, 4 + lwi r7, r1, 8 + lwi r19, r1, 12 + lwi r20, r1, 16 + lwi r21, r1, 20 + lwi r22, r1, 24 + lwi r23, r1, 28 + lwi r24, r1, 32 + lwi r25, r1, 36 + addik r1, r1, 40 + /* return back */ rtsd r15, 8 nop diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index b6bb92c16a47..41dd00884975 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev, } static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; @@ -192,7 +192,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, } static void octeon_dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { int order = get_order(size); @@ -240,8 +240,8 @@ EXPORT_SYMBOL(dma_to_phys); static struct octeon_dma_map_ops octeon_linear_dma_map_ops = { .dma_map_ops = { - .alloc_coherent = octeon_dma_alloc_coherent, - .free_coherent = octeon_dma_free_coherent, + .alloc = octeon_dma_alloc_coherent, + .free = octeon_dma_free_coherent, .map_page = octeon_dma_map_page, .unmap_page = swiotlb_unmap_page, .map_sg = octeon_dma_map_sg, @@ -325,8 +325,8 @@ void __init plat_swiotlb_setup(void) #ifdef CONFIG_PCI static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = { .dma_map_ops = { - .alloc_coherent = octeon_dma_alloc_coherent, - .free_coherent = octeon_dma_free_coherent, + .alloc = octeon_dma_alloc_coherent, + .free = octeon_dma_free_coherent, .map_page = octeon_dma_map_page, .unmap_page = swiotlb_unmap_page, .map_sg = octeon_dma_map_sg, diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 7aa37ddfca4b..be39a12901c6 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -57,25 +57,31 @@ dma_set_mask(struct device *dev, u64 mask) extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { void *ret; struct dma_map_ops *ops = get_dma_ops(dev); - ret = ops->alloc_coherent(dev, size, dma_handle, gfp); + ret = ops->alloc(dev, size, dma_handle, gfp, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, ret); return ret; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); - ops->free_coherent(dev, size, vaddr, dma_handle); + ops->free(dev, size, vaddr, dma_handle, attrs); debug_dma_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 46084912e588..3fab2046c8a4 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -98,7 +98,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, EXPORT_SYMBOL(dma_alloc_noncoherent); static void *mips_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) + dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; @@ -132,7 +132,7 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, EXPORT_SYMBOL(dma_free_noncoherent); static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) + dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long) vaddr; int order = get_order(size); @@ -323,8 +323,8 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, EXPORT_SYMBOL(dma_cache_sync); static struct dma_map_ops mips_default_dma_map_ops = { - .alloc_coherent = mips_dma_alloc_coherent, - .free_coherent = mips_dma_free_coherent, + .alloc = mips_dma_alloc_coherent, + .free = mips_dma_free_coherent, .map_page = mips_dma_map_page, .unmap_page = mips_dma_unmap_page, .map_sg = mips_dma_map_sg, diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 3ae56073cc3d..6c6defc24619 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -6,6 +6,7 @@ #define _ASM_PARISC_ATOMIC_H_ #include <linux/types.h> +#include <asm/cmpxchg.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -48,112 +49,6 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) #endif -/* This should get optimized out since it's never called. -** Or get a link error if xchg is used "wrong". -*/ -extern void __xchg_called_with_bad_pointer(void); - - -/* __xchg32/64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __xchg8(char, char *); -extern unsigned long __xchg32(int, int *); -#ifdef CONFIG_64BIT -extern unsigned long __xchg64(unsigned long, unsigned long *); -#endif - -/* optimizer better get rid of switch since size is a constant */ -static __inline__ unsigned long -__xchg(unsigned long x, __volatile__ void * ptr, int size) -{ - switch(size) { -#ifdef CONFIG_64BIT - case 8: return __xchg64(x,(unsigned long *) ptr); -#endif - case 4: return __xchg32((int) x, (int *) ptr); - case 1: return __xchg8((char) x, (char *) ptr); - } - __xchg_called_with_bad_pointer(); - return x; -} - - -/* -** REVISIT - Abandoned use of LDCW in xchg() for now: -** o need to test sizeof(*ptr) to avoid clearing adjacent bytes -** o and while we are at it, could CONFIG_64BIT code use LDCD too? -** -** if (__builtin_constant_p(x) && (x == NULL)) -** if (((unsigned long)p & 0xf) == 0) -** return __ldcw(p); -*/ -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - - -#define __HAVE_ARCH_CMPXCHG 1 - -/* bug catcher for when unsupported size is used - won't link */ -extern void __cmpxchg_called_with_bad_pointer(void); - -/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); -extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); - -/* don't worry...optimizer will get rid of most of this */ -static __inline__ unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) -{ - switch(size) { -#ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); -#endif - case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr,o,n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - -#include <asm-generic/cmpxchg-local.h> - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new_, int size) -{ - switch (size) { -#ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); -#endif - case 4: return __cmpxchg_u32(ptr, old, new_); - default: - return __cmpxchg_local_generic(ptr, old, new_, size); - } -} - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#ifdef CONFIG_64BIT -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) -#else -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif - /* * Note that we need not lock read accesses - aligned word writes/reads * are atomic, so a reader never sees inconsistent values. diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h new file mode 100644 index 000000000000..dbd13354ec41 --- /dev/null +++ b/arch/parisc/include/asm/cmpxchg.h @@ -0,0 +1,116 @@ +/* + * forked from parisc asm/atomic.h which was: + * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> + * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> + */ + +#ifndef _ASM_PARISC_CMPXCHG_H_ +#define _ASM_PARISC_CMPXCHG_H_ + +/* This should get optimized out since it's never called. +** Or get a link error if xchg is used "wrong". +*/ +extern void __xchg_called_with_bad_pointer(void); + +/* __xchg32/64 defined in arch/parisc/lib/bitops.c */ +extern unsigned long __xchg8(char, char *); +extern unsigned long __xchg32(int, int *); +#ifdef CONFIG_64BIT +extern unsigned long __xchg64(unsigned long, unsigned long *); +#endif + +/* optimizer better get rid of switch since size is a constant */ +static inline unsigned long +__xchg(unsigned long x, __volatile__ void *ptr, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __xchg64(x, (unsigned long *) ptr); +#endif + case 4: return __xchg32((int) x, (int *) ptr); + case 1: return __xchg8((char) x, (char *) ptr); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* +** REVISIT - Abandoned use of LDCW in xchg() for now: +** o need to test sizeof(*ptr) to avoid clearing adjacent bytes +** o and while we are at it, could CONFIG_64BIT code use LDCD too? +** +** if (__builtin_constant_p(x) && (x == NULL)) +** if (((unsigned long)p & 0xf) == 0) +** return __ldcw(p); +*/ +#define xchg(ptr, x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) + +#define __HAVE_ARCH_CMPXCHG 1 + +/* bug catcher for when unsupported size is used - won't link */ +extern void __cmpxchg_called_with_bad_pointer(void); + +/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ +extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, + unsigned int new_); +extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, + unsigned long old, unsigned long new_); + +/* don't worry...optimizer will get rid of most of this */ +static inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); +#endif + case 4: return __cmpxchg_u32((unsigned int *)ptr, + (unsigned int)old, (unsigned int)new_); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ +}) + +#include <asm-generic/cmpxchg-local.h> + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new_, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); +#endif + case 4: return __cmpxchg_u32(ptr, old, new_); + default: + return __cmpxchg_local_generic(ptr, old, new_, size); + } +} + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) +#ifdef CONFIG_64BIT +#define cmpxchg64_local(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ +}) +#else +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + +#endif /* _ASM_PARISC_CMPXCHG_H_ */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index dd70fac57ec8..62678e365ca0 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -22,9 +22,11 @@ /* Some dma direct funcs must be visible for use in other dma_ops */ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs); extern void dma_direct_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs); #ifdef CONFIG_NOT_COHERENT_CACHE @@ -130,23 +132,29 @@ static inline int dma_supported(struct device *dev, u64 mask) extern int dma_set_mask(struct device *dev, u64 dma_mask); -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { struct dma_map_ops *dma_ops = get_dma_ops(dev); void *cpu_addr; BUG_ON(!dma_ops); - cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag); + cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); return cpu_addr; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct dma_map_ops *dma_ops = get_dma_ops(dev); @@ -154,7 +162,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); + dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); } static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 3f6464b4d970..bcfdcd22c766 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -17,7 +17,8 @@ * to the dma address (mapping) of the first page. */ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, dma_handle, dev->coherent_dma_mask, flag, @@ -25,7 +26,8 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, } static void dma_iommu_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); } @@ -105,8 +107,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev) } struct dma_map_ops dma_iommu_ops = { - .alloc_coherent = dma_iommu_alloc_coherent, - .free_coherent = dma_iommu_free_coherent, + .alloc = dma_iommu_alloc_coherent, + .free = dma_iommu_free_coherent, .map_sg = dma_iommu_map_sg, .unmap_sg = dma_iommu_unmap_sg, .dma_supported = dma_iommu_dma_supported, diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 1ebc9189aada..4ab88dafb235 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -47,8 +47,8 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) * for everything else. */ struct dma_map_ops swiotlb_dma_ops = { - .alloc_coherent = dma_direct_alloc_coherent, - .free_coherent = dma_direct_free_coherent, + .alloc = dma_direct_alloc_coherent, + .free = dma_direct_free_coherent, .map_sg = swiotlb_map_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs, .dma_supported = swiotlb_dma_supported, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 7d0233c12ee3..b1ec983dcec8 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -26,7 +26,8 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { void *ret; #ifdef CONFIG_NOT_COHERENT_CACHE @@ -54,7 +55,8 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, } void dma_direct_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { #ifdef CONFIG_NOT_COHERENT_CACHE __dma_free_coherent(size, vaddr); @@ -150,8 +152,8 @@ static inline void dma_direct_sync_single(struct device *dev, #endif struct dma_map_ops dma_direct_ops = { - .alloc_coherent = dma_direct_alloc_coherent, - .free_coherent = dma_direct_free_coherent, + .alloc = dma_direct_alloc_coherent, + .free = dma_direct_free_coherent, .map_sg = dma_direct_map_sg, .unmap_sg = dma_direct_unmap_sg, .dma_supported = dma_direct_dma_supported, diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 79bb282e6501..b01d14eeca8d 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -65,7 +65,8 @@ static struct of_device_id __initdata ibmebus_matches[] = { static void *ibmebus_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flag) + gfp_t flag, + struct dma_attrs *attrs) { void *mem; @@ -77,7 +78,8 @@ static void *ibmebus_alloc_coherent(struct device *dev, static void ibmebus_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) + dma_addr_t dma_handle, + struct dma_attrs *attrs) { kfree(vaddr); } @@ -136,8 +138,8 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev) } static struct dma_map_ops ibmebus_dma_ops = { - .alloc_coherent = ibmebus_alloc_coherent, - .free_coherent = ibmebus_free_coherent, + .alloc = ibmebus_alloc_coherent, + .free = ibmebus_free_coherent, .map_sg = ibmebus_map_sg, .unmap_sg = ibmebus_unmap_sg, .dma_supported = ibmebus_dma_supported, diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index b2f7c8480bf6..a3a99901c8ec 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -482,7 +482,8 @@ static void vio_cmo_balance(struct work_struct *work) } static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { struct vio_dev *viodev = to_vio_dev(dev); void *ret; @@ -492,7 +493,7 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, return NULL; } - ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag); + ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs); if (unlikely(ret == NULL)) { vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); atomic_inc(&viodev->cmo.allocs_failed); @@ -502,11 +503,12 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, } static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct vio_dev *viodev = to_vio_dev(dev); - dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle); + dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs); vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); } @@ -607,8 +609,8 @@ static u64 vio_dma_get_required_mask(struct device *dev) } struct dma_map_ops vio_dma_mapping_ops = { - .alloc_coherent = vio_dma_iommu_alloc_coherent, - .free_coherent = vio_dma_iommu_free_coherent, + .alloc = vio_dma_iommu_alloc_coherent, + .free = vio_dma_iommu_free_coherent, .map_sg = vio_dma_iommu_map_sg, .unmap_sg = vio_dma_iommu_unmap_sg, .map_page = vio_dma_iommu_map_page, diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index bed1279aa6a8..e1b60f56f2a1 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -173,9 +173,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type) static struct kvmppc_linear_info *kvm_alloc_linear(int type) { - struct kvmppc_linear_info *ri; + struct kvmppc_linear_info *ri, *ret; - ri = NULL; + ret = NULL; spin_lock(&linear_lock); list_for_each_entry(ri, &free_linears, list) { if (ri->type != type) @@ -183,11 +183,12 @@ static struct kvmppc_linear_info *kvm_alloc_linear(int type) list_del(&ri->list); atomic_inc(&ri->use_count); + memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT); + ret = ri; break; } spin_unlock(&linear_lock); - memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT); - return ri; + return ret; } static void kvm_release_linear(struct kvmppc_linear_info *ri) diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 3f7b674dd4bf..d3fb4df02c41 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -46,8 +46,10 @@ _GLOBAL(__kvmppc_vcore_entry) /* Save host state to the stack */ stdu r1, -SWITCH_FRAME_SIZE(r1) - /* Save non-volatile registers (r14 - r31) */ + /* Save non-volatile registers (r14 - r31) and CR */ SAVE_NVGPRS(r1) + mfcr r3 + std r3, _CCR(r1) /* Save host DSCR */ BEGIN_FTR_SECTION @@ -157,8 +159,10 @@ kvmppc_handler_highmem: * R13 = PACA */ - /* Restore non-volatile host registers (r14 - r31) */ + /* Restore non-volatile host registers (r14 - r31) and CR */ REST_NVGPRS(r1) + ld r4, _CCR(r1) + mtcr r4 addi r1, r1, SWITCH_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 0a8515a5c042..3e35383bdb21 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -84,6 +84,10 @@ kvm_start_entry: /* Save non-volatile registers (r14 - r31) */ SAVE_NVGPRS(r1) + /* Save CR */ + mfcr r14 + stw r14, _CCR(r1) + /* Save LR */ PPC_STL r0, _LINK(r1) @@ -165,6 +169,9 @@ kvm_exit_loop: PPC_LL r4, _LINK(r1) mtlr r4 + lwz r14, _CCR(r1) + mtcr r14 + /* Restore non-volatile host registers (r14 - r31) */ REST_NVGPRS(r1) diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 642d88574b07..7759053d391b 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -777,6 +777,7 @@ program_interrupt: } } + preempt_disable(); if (!(r & RESUME_HOST)) { /* To avoid clobbering exit_reason, only check for signals if * we aren't already exiting to userspace for some other @@ -798,8 +799,6 @@ program_interrupt: run->exit_reason = KVM_EXIT_INTR; r = -EINTR; } else { - preempt_disable(); - /* In case an interrupt came in that was triggered * from userspace (like DEC), we need to check what * to inject now! */ @@ -881,7 +880,8 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) switch (reg->id) { case KVM_REG_PPC_HIOR: - r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); + r = copy_to_user((u64 __user *)(long)reg->addr, + &to_book3s(vcpu)->hior, sizeof(u64)); break; default: break; @@ -896,7 +896,8 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) switch (reg->id) { case KVM_REG_PPC_HIOR: - r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); + r = copy_from_user(&to_book3s(vcpu)->hior, + (u64 __user *)(long)reg->addr, sizeof(u64)); if (!r) to_book3s(vcpu)->hior_explicit = true; break; diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 10d8ef602e5c..c8c4b878795a 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -34,7 +34,8 @@ /* r2 is special: it holds 'current', and it made nonvolatile in the * kernel with the -ffixed-r2 gcc option. */ #define HOST_R2 12 -#define HOST_NV_GPRS 16 +#define HOST_CR 16 +#define HOST_NV_GPRS 20 #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ @@ -296,8 +297,10 @@ heavyweight_exit: /* Return to kvm_vcpu_run(). */ lwz r4, HOST_STACK_LR(r1) + lwz r5, HOST_CR(r1) addi r1, r1, HOST_STACK_SIZE mtlr r4 + mtcr r5 /* r3 still contains the return code from kvmppc_handle_exit(). */ blr @@ -314,6 +317,8 @@ _GLOBAL(__kvmppc_vcpu_run) stw r3, HOST_RUN(r1) mflr r3 stw r3, HOST_STACK_LR(r1) + mfcr r5 + stw r5, HOST_CR(r1) /* Save host non-volatile register state to stack. */ stw r14, HOST_NV_GPR(r14)(r1) diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index ae9fc7bc17d6..b9f509a34c01 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -564,7 +564,8 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) /* A coherent allocation implies strong ordering */ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { if (iommu_fixed_is_weak) return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), @@ -572,18 +573,19 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, device_to_mask(dev), flag, dev_to_node(dev)); else - return dma_direct_ops.alloc_coherent(dev, size, dma_handle, - flag); + return dma_direct_ops.alloc(dev, size, dma_handle, flag, + attrs); } static void dma_fixed_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { if (iommu_fixed_is_weak) iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, dma_handle); else - dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle); + dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs); } static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, @@ -642,8 +644,8 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask) static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); struct dma_map_ops dma_iommu_fixed_ops = { - .alloc_coherent = dma_fixed_alloc_coherent, - .free_coherent = dma_fixed_free_coherent, + .alloc = dma_fixed_alloc_coherent, + .free = dma_fixed_free_coherent, .map_sg = dma_fixed_map_sg, .unmap_sg = dma_fixed_unmap_sg, .dma_supported = dma_fixed_dma_supported, diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 880eb9ce22c5..5606fe36faf2 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -515,7 +515,8 @@ core_initcall(ps3_system_bus_init); * to the dma address (mapping) of the first page. */ static void * ps3_alloc_coherent(struct device *_dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { int result; struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); @@ -552,7 +553,7 @@ clean_none: } static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, - dma_addr_t dma_handle) + dma_addr_t dma_handle, struct dma_attrs *attrs) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); @@ -701,8 +702,8 @@ static u64 ps3_dma_get_required_mask(struct device *_dev) } static struct dma_map_ops ps3_sb_dma_ops = { - .alloc_coherent = ps3_alloc_coherent, - .free_coherent = ps3_free_coherent, + .alloc = ps3_alloc_coherent, + .free = ps3_free_coherent, .map_sg = ps3_sb_map_sg, .unmap_sg = ps3_sb_unmap_sg, .dma_supported = ps3_dma_supported, @@ -712,8 +713,8 @@ static struct dma_map_ops ps3_sb_dma_ops = { }; static struct dma_map_ops ps3_ioc0_dma_ops = { - .alloc_coherent = ps3_alloc_coherent, - .free_coherent = ps3_free_coherent, + .alloc = ps3_alloc_coherent, + .free = ps3_free_coherent, .map_sg = ps3_ioc0_map_sg, .unmap_sg = ps3_ioc0_unmap_sg, .dma_supported = ps3_dma_supported, diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index c1d5a820b1aa..5f2bb4242c0f 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -61,6 +61,7 @@ config DUMP_CODE config DWARF_UNWINDER bool "Enable the DWARF unwinder for stacktraces" select FRAME_POINTER + depends on SUPERH32 default n help Enabling this option will make stacktraces more accurate, at diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c index d879848f3cdd..d0d6221d7c2e 100644 --- a/arch/sh/boards/board-sh7785lcr.c +++ b/arch/sh/boards/board-sh7785lcr.c @@ -28,6 +28,7 @@ #include <cpu/sh7785.h> #include <asm/heartbeat.h> #include <asm/clock.h> +#include <asm/bl_bit.h> /* * NOTE: This board has 2 physical memory maps. diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c index adc9b4bba828..8b50cf763c06 100644 --- a/arch/sh/boards/mach-hp6xx/pm.c +++ b/arch/sh/boards/mach-hp6xx/pm.c @@ -14,6 +14,7 @@ #include <linux/gfp.h> #include <asm/io.h> #include <asm/hd64461.h> +#include <asm/bl_bit.h> #include <mach/hp6xx.h> #include <cpu/dac.h> #include <asm/freq.h> diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c index b1cb2715ad6e..67ee95603813 100644 --- a/arch/sh/drivers/dma/dma-sysfs.c +++ b/arch/sh/drivers/dma/dma-sysfs.c @@ -54,7 +54,7 @@ static int __init dma_subsys_init(void) if (unlikely(ret)) return ret; - return device_create_file(dma_subsys.dev_root, &dev_attr_devices.attr); + return device_create_file(dma_subsys.dev_root, &dev_attr_devices); } postcore_initcall(dma_subsys_init); diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 1a73c3e759a7..8bd965e00a15 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -52,25 +52,31 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == 0; } -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); void *memory; if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) return memory; - if (!ops->alloc_coherent) + if (!ops->alloc) return NULL; - memory = ops->alloc_coherent(dev, size, dma_handle, gfp); + memory = ops->alloc(dev, size, dma_handle, gfp, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, memory); return memory; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -78,14 +84,16 @@ static inline void dma_free_coherent(struct device *dev, size_t size, return; debug_dma_free_coherent(dev, size, vaddr, dma_handle); - if (ops->free_coherent) - ops->free_coherent(dev, size, vaddr, dma_handle); + if (ops->free) + ops->free(dev, size, vaddr, dma_handle, attrs); } /* arch/sh/mm/consistent.c */ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag); + dma_addr_t *dma_addr, gfp_t flag, + struct dma_attrs *attrs); extern void dma_generic_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs); #endif /* __ASM_SH_DMA_MAPPING_H */ diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c index 7f1b70cace35..f8f7af51c128 100644 --- a/arch/sh/kernel/cpu/fpu.c +++ b/arch/sh/kernel/cpu/fpu.c @@ -2,6 +2,7 @@ #include <linux/slab.h> #include <asm/processor.h> #include <asm/fpu.h> +#include <asm/traps.h> int init_fpu(struct task_struct *tsk) { diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c index 488d24e0cdf0..98bbaa447c93 100644 --- a/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/arch/sh/kernel/cpu/sh2a/fpu.c @@ -14,6 +14,7 @@ #include <asm/processor.h> #include <asm/io.h> #include <asm/fpu.h> +#include <asm/traps.h> /* The PR (precision) bit in the FP Status Register must be clear when * an frchg instruction is executed, otherwise the instruction is undefined. diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index e74cd6c0f10d..69ab4d3c8d41 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -16,6 +16,7 @@ #include <cpu/fpu.h> #include <asm/processor.h> #include <asm/fpu.h> +#include <asm/traps.h> /* The PR (precision) bit in the FP Status Register must be clear when * an frchg instruction is executed, otherwise the instruction is undefined. diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c index 5853989586ed..04ab5aeaf920 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c @@ -113,7 +113,7 @@ static struct clk_lookup lookups[] = { CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), /* MSTP32 clocks */ - CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]), + CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP004]), CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]), CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]), CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]), diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index a6f95ae4aae7..08d27fac8d08 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -16,6 +16,7 @@ #include <asm/suspend.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> +#include <asm/bl_bit.h> /* * Notifier lists for pre/post sleep notification diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 3c55b87f8b63..5b0bfcda6d0b 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -63,8 +63,8 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, #endif struct dma_map_ops nommu_dma_ops = { - .alloc_coherent = dma_generic_alloc_coherent, - .free_coherent = dma_generic_free_coherent, + .alloc = dma_generic_alloc_coherent, + .free = dma_generic_free_coherent, .map_page = nommu_map_page, .map_sg = nommu_map_sg, #ifdef CONFIG_DMA_NONCOHERENT diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 64852ecc6881..ee226e20c20c 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -17,8 +17,8 @@ #include <linux/irqflags.h> #include <linux/smp.h> #include <linux/cpuidle.h> -#include <asm/pgalloc.h> #include <linux/atomic.h> +#include <asm/pgalloc.h> #include <asm/smp.h> #include <asm/bl_bit.h> diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index efb6d398dec3..b117781bfea2 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c @@ -14,6 +14,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <asm/cacheflush.h> +#include <asm/traps.h> /* Macros for single step instruction identification */ #define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index f72e3a951588..94273aaf78c1 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -26,6 +26,7 @@ #include <asm/mmu_context.h> #include <asm/fpu.h> #include <asm/syscalls.h> +#include <asm/switch_to.h> void show_regs(struct pt_regs * regs) { diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index a17a14d32340..eaebdf6a5c77 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -27,6 +27,7 @@ #include <asm/smp.h> #include <asm/cacheflush.h> #include <asm/sections.h> +#include <asm/setup.h> int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S index 555a64f124ca..23af17584054 100644 --- a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S +++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S @@ -34,6 +34,41 @@ __kernel_rt_sigreturn: 1: .short __NR_rt_sigreturn .LEND_rt_sigreturn: .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn + .previous .section .eh_frame,"a",@progbits +.LCIE1: + .ualong .LCIE1_end - .LCIE1_start +.LCIE1_start: + .ualong 0 /* CIE ID */ + .byte 0x1 /* Version number */ + .string "zRS" /* NUL-terminated augmentation string */ + .uleb128 0x1 /* Code alignment factor */ + .sleb128 -4 /* Data alignment factor */ + .byte 0x11 /* Return address register column */ + .uleb128 0x1 /* Augmentation length and data */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0xc, 0xf, 0x0 /* DW_CFA_def_cfa: r15 ofs 0 */ + + .align 2 +.LCIE1_end: + + .ualong .LFDE0_end-.LFDE0_start /* Length FDE0 */ +.LFDE0_start: + .ualong .LFDE0_start-.LCIE1 /* CIE pointer */ + .ualong .LSTART_sigreturn-. /* PC-relative start address */ + .ualong .LEND_sigreturn-.LSTART_sigreturn + .uleb128 0 /* Augmentation */ + .align 2 +.LFDE0_end: + + .ualong .LFDE1_end-.LFDE1_start /* Length FDE1 */ +.LFDE1_start: + .ualong .LFDE1_start-.LCIE1 /* CIE pointer */ + .ualong .LSTART_rt_sigreturn-. /* PC-relative start address */ + .ualong .LEND_rt_sigreturn-.LSTART_rt_sigreturn + .uleb128 0 /* Augmentation */ + .align 2 +.LFDE1_end: + .previous diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S index 3e70f851cdc6..0eb74d00690a 100644 --- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S +++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S @@ -3,37 +3,34 @@ .type __kernel_vsyscall,@function __kernel_vsyscall: .LSTART_vsyscall: - /* XXX: We'll have to do something here once we opt to use the vDSO - * page for something other than the signal trampoline.. as well as - * fill out .eh_frame -- PFM. */ + trapa #0x10 + nop .LEND_vsyscall: .size __kernel_vsyscall,.-.LSTART_vsyscall + .previous .section .eh_frame,"a",@progbits - .previous .LCIE: .ualong .LCIE_end - .LCIE_start .LCIE_start: .ualong 0 /* CIE ID */ .byte 0x1 /* Version number */ - .string "zRS" /* NUL-terminated augmentation string */ + .string "zR" /* NUL-terminated augmentation string */ .uleb128 0x1 /* Code alignment factor */ .sleb128 -4 /* Data alignment factor */ .byte 0x11 /* Return address register column */ - /* Augmentation length and data (none) */ - .byte 0xc /* DW_CFA_def_cfa */ - .uleb128 0xf /* r15 */ - .uleb128 0x0 /* offset 0 */ - + .uleb128 0x1 /* Augmentation length and data */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0xc,0xf,0x0 /* DW_CFA_def_cfa: r15 ofs 0 */ .align 2 .LCIE_end: .ualong .LFDE_end-.LFDE_start /* Length FDE */ .LFDE_start: - .ualong .LCIE /* CIE pointer */ - .ualong .LSTART_vsyscall-. /* start address */ + .ualong .LFDE_start-.LCIE /* CIE pointer */ + .ualong .LSTART_vsyscall-. /* PC-relative start address */ .ualong .LEND_vsyscall-.LSTART_vsyscall - .uleb128 0 + .uleb128 0 /* Augmentation */ .align 2 .LFDE_end: .previous diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 112fea12522a..0e529285b28d 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -18,6 +18,7 @@ #include <linux/highmem.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> +#include <asm/cache_insns.h> #include <asm/cacheflush.h> /* diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index f251b5f27652..b81d9dbf9fef 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -33,7 +33,8 @@ static int __init dma_init(void) fs_initcall(dma_init); void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { void *ret, *ret_nocache; int order = get_order(size); @@ -64,7 +65,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, } void dma_generic_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { int order = get_order(size); unsigned long pfn = dma_handle >> PAGE_SHIFT; diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c index 75a17f5bfa14..0b85dd9dd3a7 100644 --- a/arch/sh/mm/flush-sh4.c +++ b/arch/sh/mm/flush-sh4.c @@ -1,5 +1,6 @@ #include <linux/mm.h> #include <asm/mmu_context.h> +#include <asm/cache_insns.h> #include <asm/cacheflush.h> #include <asm/traps.h> diff --git a/arch/sh/mm/sram.c b/arch/sh/mm/sram.c index bc156ec4545e..2d8fa718d55e 100644 --- a/arch/sh/mm/sram.c +++ b/arch/sh/mm/sram.c @@ -9,6 +9,7 @@ */ #include <linux/init.h> #include <linux/kernel.h> +#include <linux/errno.h> #include <asm/sram.h> /* diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 8c0e4f7bb204..48a7c65731d2 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -26,24 +26,30 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include <asm-generic/dma-mapping-common.h> -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; - cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); return cpu_addr; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); } static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 6fa2f7980e6b..76e4a52aa85e 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -12,8 +12,6 @@ * the SpitFire page tables. */ -#include <asm-generic/pgtable-nopud.h> - #include <linux/compiler.h> #include <linux/const.h> #include <asm/types.h> @@ -22,6 +20,8 @@ #include <asm/page.h> #include <asm/processor.h> +#include <asm-generic/pgtable-nopud.h> + /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). * The page copy blockops can use 0x6000000 to 0x8000000. * The TSB is mapped in the 0x8000000 to 0xa000000 range. diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 4643d68713fa..070ed141aac7 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -280,7 +280,8 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx) } static void *dma_4u_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addrp, gfp_t gfp) + dma_addr_t *dma_addrp, gfp_t gfp, + struct dma_attrs *attrs) { unsigned long flags, order, first_page; struct iommu *iommu; @@ -330,7 +331,8 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, } static void dma_4u_free_coherent(struct device *dev, size_t size, - void *cpu, dma_addr_t dvma) + void *cpu, dma_addr_t dvma, + struct dma_attrs *attrs) { struct iommu *iommu; unsigned long flags, order, npages; @@ -825,8 +827,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, } static struct dma_map_ops sun4u_dma_ops = { - .alloc_coherent = dma_4u_alloc_coherent, - .free_coherent = dma_4u_free_coherent, + .alloc = dma_4u_alloc_coherent, + .free = dma_4u_free_coherent, .map_page = dma_4u_map_page, .unmap_page = dma_4u_unmap_page, .map_sg = dma_4u_map_sg, diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index d0479e2163fa..21bd73943f7f 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -261,7 +261,8 @@ EXPORT_SYMBOL(sbus_set_sbus64); * CPU may access them without any explicit flushing. */ static void *sbus_alloc_coherent(struct device *dev, size_t len, - dma_addr_t *dma_addrp, gfp_t gfp) + dma_addr_t *dma_addrp, gfp_t gfp, + struct dma_attrs *attrs) { struct platform_device *op = to_platform_device(dev); unsigned long len_total = PAGE_ALIGN(len); @@ -315,7 +316,7 @@ err_nopages: } static void sbus_free_coherent(struct device *dev, size_t n, void *p, - dma_addr_t ba) + dma_addr_t ba, struct dma_attrs *attrs) { struct resource *res; struct page *pgv; @@ -407,8 +408,8 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, } struct dma_map_ops sbus_dma_ops = { - .alloc_coherent = sbus_alloc_coherent, - .free_coherent = sbus_free_coherent, + .alloc = sbus_alloc_coherent, + .free = sbus_free_coherent, .map_page = sbus_map_page, .unmap_page = sbus_unmap_page, .map_sg = sbus_map_sg, @@ -436,7 +437,8 @@ arch_initcall(sparc_register_ioport); * hwdev should be valid struct pci_dev pointer for PCI devices. */ static void *pci32_alloc_coherent(struct device *dev, size_t len, - dma_addr_t *pba, gfp_t gfp) + dma_addr_t *pba, gfp_t gfp, + struct dma_attrs *attrs) { unsigned long len_total = PAGE_ALIGN(len); void *va; @@ -489,7 +491,7 @@ err_nopages: * past this call are illegal. */ static void pci32_free_coherent(struct device *dev, size_t n, void *p, - dma_addr_t ba) + dma_addr_t ba, struct dma_attrs *attrs) { struct resource *res; @@ -645,8 +647,8 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * } struct dma_map_ops pci32_dma_ops = { - .alloc_coherent = pci32_alloc_coherent, - .free_coherent = pci32_free_coherent, + .alloc = pci32_alloc_coherent, + .free = pci32_free_coherent, .map_page = pci32_map_page, .unmap_page = pci32_unmap_page, .map_sg = pci32_map_sg, diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c index aba6b958b2a5..19f56058742b 100644 --- a/arch/sparc/kernel/leon_pci.c +++ b/arch/sparc/kernel/leon_pci.c @@ -45,7 +45,6 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) void __devinit pcibios_fixup_bus(struct pci_bus *pbus) { - struct leon_pci_info *info = pbus->sysdata; struct pci_dev *dev; int i, has_io, has_mem; u16 cmd; @@ -111,18 +110,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) return pci_enable_resources(dev, mask); } -struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - /* - * Currently the OpenBoot nodes are not connected with the PCI device, - * this is because the LEON PROM does not create PCI nodes. Eventually - * this will change and the same approach as pcic.c can be used to - * match PROM nodes with pci devices. - */ - return NULL; -} -EXPORT_SYMBOL(pci_device_to_OF_node); - void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) { #ifdef CONFIG_PCI_DEBUG diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index af5755d20fbe..7661e84a05a0 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -128,7 +128,8 @@ static inline long iommu_batch_end(void) } static void *dma_4v_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addrp, gfp_t gfp) + dma_addr_t *dma_addrp, gfp_t gfp, + struct dma_attrs *attrs) { unsigned long flags, order, first_page, npages, n; struct iommu *iommu; @@ -198,7 +199,7 @@ range_alloc_fail: } static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, - dma_addr_t dvma) + dma_addr_t dvma, struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct iommu *iommu; @@ -527,8 +528,8 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, } static struct dma_map_ops sun4v_dma_ops = { - .alloc_coherent = dma_4v_alloc_coherent, - .free_coherent = dma_4v_free_coherent, + .alloc = dma_4v_alloc_coherent, + .free = dma_4v_free_coherent, .map_page = dma_4v_map_page, .unmap_page = dma_4v_unmap_page, .map_sg = dma_4v_map_sg, diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 7705c6731e28..df3155a17991 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -225,6 +225,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned long g2; int from_user = !(regs->psr & PSR_PS); int fault, code; + unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + (write ? FAULT_FLAG_WRITE : 0)); if(text_fault) address = regs->pc; @@ -251,6 +253,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +retry: down_read(&mm->mmap_sem); /* @@ -289,7 +292,11 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -297,13 +304,29 @@ good_area: goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); + + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) { + current->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, + 1, regs, address); + } else { + current->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, + 1, regs, address); + } + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + /* No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } } + up_read(&mm->mmap_sem); return; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 504c0622f729..1fe0429b6314 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -279,6 +279,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) unsigned int insn = 0; int si_code, fault_code, fault; unsigned long address, mm_rss; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; fault_code = get_thread_fault_code(); @@ -333,6 +334,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) insn = get_fault_insn(regs, insn); goto handle_kernel_fault; } + +retry: down_read(&mm->mmap_sem); } @@ -423,7 +426,12 @@ good_area: goto bad_area; } - fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); + flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -431,12 +439,27 @@ good_area: goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); + + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) { + current->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, + 1, regs, address); + } else { + current->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, + 1, regs, address); + } + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + /* No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } } up_read(&mm->mmap_sem); diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 11270ca22c0a..96033e2d6845 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -12,7 +12,7 @@ config TILE select GENERIC_PENDING_IRQ if SMP select GENERIC_IRQ_SHOW select SYS_HYPERVISOR - select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386 + select ARCH_HAVE_NMI_SAFE_CMPXCHG # FIXME: investigate whether we need/want these options. # select HAVE_IOREMAP_PROT @@ -69,6 +69,9 @@ config ARCH_PHYS_ADDR_T_64BIT config ARCH_DMA_ADDR_T_64BIT def_bool y +config NEED_DMA_MAP_STATE + def_bool y + config LOCKDEP_SUPPORT def_bool y @@ -118,7 +121,7 @@ config 64BIT config ARCH_DEFCONFIG string - default "arch/tile/configs/tile_defconfig" if !TILEGX + default "arch/tile/configs/tilepro_defconfig" if !TILEGX default "arch/tile/configs/tilegx_defconfig" if TILEGX source "init/Kconfig" @@ -240,6 +243,7 @@ endchoice config PAGE_OFFSET hex + depends on !64BIT default 0xF0000000 if VMSPLIT_3_75G default 0xE0000000 if VMSPLIT_3_5G default 0xB0000000 if VMSPLIT_2_75G diff --git a/arch/tile/Makefile b/arch/tile/Makefile index 17acce70569b..9520bc5a4b7f 100644 --- a/arch/tile/Makefile +++ b/arch/tile/Makefile @@ -30,7 +30,8 @@ ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"") KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) endif -LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) +LIBGCC_PATH := \ + $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) # Provide the path to use for "make defconfig". KBUILD_DEFCONFIG := $(ARCH)_defconfig @@ -53,8 +54,6 @@ libs-y += $(LIBGCC_PATH) # See arch/tile/Kbuild for content of core part of the kernel core-y += arch/tile/ -core-$(CONFIG_KVM) += arch/tile/kvm/ - ifdef TILERA_ROOT INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot endif diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h index f548efeb2de3..d6ba449b5363 100644 --- a/arch/tile/include/arch/spr_def.h +++ b/arch/tile/include/arch/spr_def.h @@ -60,8 +60,8 @@ _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,) #define SPR_IPI_EVENT_RESET_K \ _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,) -#define SPR_IPI_MASK_SET_K \ - _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,) +#define SPR_IPI_EVENT_SET_K \ + _concat4(SPR_IPI_EVENT_SET_, CONFIG_KERNEL_PL,,) #define INT_IPI_K \ _concat4(INT_IPI_, CONFIG_KERNEL_PL,,) diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index bb696da5d7cd..f2461429a4a4 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h @@ -17,6 +17,8 @@ #ifndef _ASM_TILE_ATOMIC_H #define _ASM_TILE_ATOMIC_H +#include <asm/cmpxchg.h> + #ifndef __ASSEMBLY__ #include <linux/compiler.h> @@ -121,54 +123,6 @@ static inline int atomic_read(const atomic_t *v) */ #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) -/* Nonexistent functions intended to cause link errors. */ -extern unsigned long __xchg_called_with_bad_pointer(void); -extern unsigned long __cmpxchg_called_with_bad_pointer(void); - -#define xchg(ptr, x) \ - ({ \ - typeof(*(ptr)) __x; \ - switch (sizeof(*(ptr))) { \ - case 4: \ - __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \ - (atomic_t *)(ptr), \ - (u32)(typeof((x)-(x)))(x)); \ - break; \ - case 8: \ - __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \ - (atomic64_t *)(ptr), \ - (u64)(typeof((x)-(x)))(x)); \ - break; \ - default: \ - __xchg_called_with_bad_pointer(); \ - } \ - __x; \ - }) - -#define cmpxchg(ptr, o, n) \ - ({ \ - typeof(*(ptr)) __x; \ - switch (sizeof(*(ptr))) { \ - case 4: \ - __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \ - (atomic_t *)(ptr), \ - (u32)(typeof((o)-(o)))(o), \ - (u32)(typeof((n)-(n)))(n)); \ - break; \ - case 8: \ - __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \ - (atomic64_t *)(ptr), \ - (u64)(typeof((o)-(o)))(o), \ - (u64)(typeof((n)-(n)))(n)); \ - break; \ - default: \ - __cmpxchg_called_with_bad_pointer(); \ - } \ - __x; \ - }) - -#define tas(ptr) (xchg((ptr), 1)) - #endif /* __ASSEMBLY__ */ #ifndef __tilegx__ diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 466dc4a39a4f..54d1da826f93 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -200,7 +200,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as @v was not already @u. - * Returns the old value of @v. + * Returns non-zero if @v was not @u, and zero otherwise. */ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) { diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h index 58d021a9834f..60b87ee54fb8 100644 --- a/arch/tile/include/asm/bitops_64.h +++ b/arch/tile/include/asm/bitops_64.h @@ -38,10 +38,10 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr) static inline void change_bit(unsigned nr, volatile unsigned long *addr) { - unsigned long old, mask = (1UL << (nr % BITS_PER_LONG)); - long guess, oldval; + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + unsigned long guess, oldval; addr += nr / BITS_PER_LONG; - old = *addr; + oldval = *addr; do { guess = oldval; oldval = atomic64_cmpxchg((atomic64_t *)addr, @@ -85,7 +85,7 @@ static inline int test_and_change_bit(unsigned nr, volatile unsigned long *addr) { unsigned long mask = (1UL << (nr % BITS_PER_LONG)); - long guess, oldval = *addr; + unsigned long guess, oldval; addr += nr / BITS_PER_LONG; oldval = *addr; do { diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h new file mode 100644 index 000000000000..276f067e3640 --- /dev/null +++ b/arch/tile/include/asm/cmpxchg.h @@ -0,0 +1,73 @@ +/* + * cmpxchg.h -- forked from asm/atomic.h with this copyright: + * + * Copyright 2010 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + */ + +#ifndef _ASM_TILE_CMPXCHG_H +#define _ASM_TILE_CMPXCHG_H + +#ifndef __ASSEMBLY__ + +/* Nonexistent functions intended to cause link errors. */ +extern unsigned long __xchg_called_with_bad_pointer(void); +extern unsigned long __cmpxchg_called_with_bad_pointer(void); + +#define xchg(ptr, x) \ + ({ \ + typeof(*(ptr)) __x; \ + switch (sizeof(*(ptr))) { \ + case 4: \ + __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \ + (atomic_t *)(ptr), \ + (u32)(typeof((x)-(x)))(x)); \ + break; \ + case 8: \ + __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \ + (atomic64_t *)(ptr), \ + (u64)(typeof((x)-(x)))(x)); \ + break; \ + default: \ + __xchg_called_with_bad_pointer(); \ + } \ + __x; \ + }) + +#define cmpxchg(ptr, o, n) \ + ({ \ + typeof(*(ptr)) __x; \ + switch (sizeof(*(ptr))) { \ + case 4: \ + __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \ + (atomic_t *)(ptr), \ + (u32)(typeof((o)-(o)))(o), \ + (u32)(typeof((n)-(n)))(n)); \ + break; \ + case 8: \ + __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \ + (atomic64_t *)(ptr), \ + (u64)(typeof((o)-(o)))(o), \ + (u64)(typeof((n)-(n)))(n)); \ + break; \ + default: \ + __cmpxchg_called_with_bad_pointer(); \ + } \ + __x; \ + }) + +#define tas(ptr) (xchg((ptr), 1)) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_TILE_CMPXCHG_H */ diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h index f80f8ceabc67..33cff9a3058b 100644 --- a/arch/tile/include/asm/irq.h +++ b/arch/tile/include/asm/irq.h @@ -21,7 +21,7 @@ #define NR_IRQS 32 /* IRQ numbers used for linux IPIs. */ -#define IRQ_RESCHEDULE 1 +#define IRQ_RESCHEDULE 0 #define irq_canonicalize(irq) (irq) diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h index 72be5904e020..5f8b6a095fd8 100644 --- a/arch/tile/include/asm/spinlock_64.h +++ b/arch/tile/include/asm/spinlock_64.h @@ -137,7 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw) { __insn_mf(); - rw->lock = 0; + __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */ } static inline int arch_read_trylock(arch_rwlock_t *rw) diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h index 4d97a2db932e..0e9d382a2d45 100644 --- a/arch/tile/include/asm/stack.h +++ b/arch/tile/include/asm/stack.h @@ -25,7 +25,6 @@ struct KBacktraceIterator { BacktraceIterator it; struct task_struct *task; /* task we are backtracing */ - pte_t *pgtable; /* page table for user space access */ int end; /* iteration complete. */ int new_context; /* new context is starting */ int profile; /* profiling, so stop on async intrpt */ diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h index 5f20f920f932..e28c3df4176a 100644 --- a/arch/tile/include/asm/traps.h +++ b/arch/tile/include/asm/traps.h @@ -64,7 +64,11 @@ void do_breakpoint(struct pt_regs *, int fault_num); #ifdef __tilegx__ +/* kernel/single_step.c */ void gx_singlestep_handle(struct pt_regs *, int fault_num); + +/* kernel/intvec_64.S */ +void fill_ra_stack(void); #endif -#endif /* _ASM_TILE_SYSCALLS_H */ +#endif /* _ASM_TILE_TRAPS_H */ diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index 431e9ae60488..ec91568df880 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S @@ -85,6 +85,7 @@ STD_ENTRY(cpu_idle_on_new_stack) /* Loop forever on a nap during SMP boot. */ STD_ENTRY(smp_nap) nap + nop /* avoid provoking the icache prefetch with a jump */ j smp_nap /* we are not architecturally guaranteed not to exit nap */ jrp lr /* clue in the backtracer */ STD_ENDPROC(smp_nap) @@ -105,5 +106,6 @@ STD_ENTRY(_cpu_idle) .global _cpu_idle_nap _cpu_idle_nap: nap + nop /* avoid provoking the icache prefetch with a jump */ jrp lr STD_ENDPROC(_cpu_idle) diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index aecc8ed5f39b..5d56a1ef5ba5 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -799,6 +799,10 @@ handle_interrupt: * This routine takes a boolean in r30 indicating if this is an NMI. * If so, we also expect a boolean in r31 indicating whether to * re-enable the oprofile interrupts. + * + * Note that .Lresume_userspace is jumped to directly in several + * places, and we need to make sure r30 is set correctly in those + * callers as well. */ STD_ENTRY(interrupt_return) /* If we're resuming to kernel space, don't check thread flags. */ @@ -1237,7 +1241,10 @@ handle_syscall: bzt r30, 1f jal do_syscall_trace FEEDBACK_REENTER(handle_syscall) -1: j .Lresume_userspace /* jump into middle of interrupt_return */ +1: { + movei r30, 0 /* not an NMI */ + j .Lresume_userspace /* jump into middle of interrupt_return */ + } .Linvalid_syscall: /* Report an invalid syscall back to the user program */ @@ -1246,7 +1253,10 @@ handle_syscall: movei r28, -ENOSYS } sw r29, r28 - j .Lresume_userspace /* jump into middle of interrupt_return */ + { + movei r30, 0 /* not an NMI */ + j .Lresume_userspace /* jump into middle of interrupt_return */ + } STD_ENDPROC(handle_syscall) /* Return the address for oprofile to suppress in backtraces. */ @@ -1262,7 +1272,10 @@ STD_ENTRY(ret_from_fork) jal sim_notify_fork jal schedule_tail FEEDBACK_REENTER(ret_from_fork) - j .Lresume_userspace /* jump into middle of interrupt_return */ + { + movei r30, 0 /* not an NMI */ + j .Lresume_userspace /* jump into middle of interrupt_return */ + } STD_ENDPROC(ret_from_fork) /* @@ -1376,7 +1389,10 @@ handle_ill: jal send_sigtrap /* issue a SIGTRAP */ FEEDBACK_REENTER(handle_ill) - j .Lresume_userspace /* jump into middle of interrupt_return */ + { + movei r30, 0 /* not an NMI */ + j .Lresume_userspace /* jump into middle of interrupt_return */ + } .Ldispatch_normal_ill: { diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 79c93e10ba27..49d9d6621682 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -22,6 +22,7 @@ #include <asm/irqflags.h> #include <asm/asm-offsets.h> #include <asm/types.h> +#include <asm/signal.h> #include <hv/hypervisor.h> #include <arch/abi.h> #include <arch/interrupts.h> @@ -605,6 +606,10 @@ handle_interrupt: * This routine takes a boolean in r30 indicating if this is an NMI. * If so, we also expect a boolean in r31 indicating whether to * re-enable the oprofile interrupts. + * + * Note that .Lresume_userspace is jumped to directly in several + * places, and we need to make sure r30 is set correctly in those + * callers as well. */ STD_ENTRY(interrupt_return) /* If we're resuming to kernel space, don't check thread flags. */ @@ -1039,11 +1044,28 @@ handle_syscall: /* Do syscall trace again, if requested. */ ld r30, r31 - andi r30, r30, _TIF_SYSCALL_TRACE - beqzt r30, 1f + andi r0, r30, _TIF_SYSCALL_TRACE + { + andi r0, r30, _TIF_SINGLESTEP + beqzt r0, 1f + } jal do_syscall_trace FEEDBACK_REENTER(handle_syscall) -1: j .Lresume_userspace /* jump into middle of interrupt_return */ + andi r0, r30, _TIF_SINGLESTEP + +1: beqzt r0, 2f + + /* Single stepping -- notify ptrace. */ + { + movei r0, SIGTRAP + jal ptrace_notify + } + FEEDBACK_REENTER(handle_syscall) + +2: { + movei r30, 0 /* not an NMI */ + j .Lresume_userspace /* jump into middle of interrupt_return */ + } .Lcompat_syscall: /* @@ -1077,7 +1099,10 @@ handle_syscall: movei r28, -ENOSYS } st r29, r28 - j .Lresume_userspace /* jump into middle of interrupt_return */ + { + movei r30, 0 /* not an NMI */ + j .Lresume_userspace /* jump into middle of interrupt_return */ + } STD_ENDPROC(handle_syscall) /* Return the address for oprofile to suppress in backtraces. */ @@ -1093,7 +1118,10 @@ STD_ENTRY(ret_from_fork) jal sim_notify_fork jal schedule_tail FEEDBACK_REENTER(ret_from_fork) - j .Lresume_userspace + { + movei r30, 0 /* not an NMI */ + j .Lresume_userspace /* jump into middle of interrupt_return */ + } STD_ENDPROC(ret_from_fork) /* Various stub interrupt handlers and syscall handlers */ @@ -1156,6 +1184,18 @@ int_unalign: push_extra_callee_saves r0 j do_trap +/* Fill the return address stack with nonzero entries. */ +STD_ENTRY(fill_ra_stack) + { + move r0, lr + jal 1f + } +1: jal 2f +2: jal 3f +3: jal 4f +4: jrp r0 + STD_ENDPROC(fill_ra_stack) + /* Include .intrpt1 array of interrupt vectors */ .section ".intrpt1", "ax" @@ -1166,7 +1206,7 @@ int_unalign: #define do_hardwall_trap bad_intr #endif - int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr + int_hand INT_MEM_ERROR, MEM_ERROR, do_trap int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr #if CONFIG_KERNEL_PL == 2 int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c index b90ab9925674..98d476920106 100644 --- a/arch/tile/kernel/module.c +++ b/arch/tile/kernel/module.c @@ -67,6 +67,8 @@ void *module_alloc(unsigned long size) area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); if (!area) goto error; + area->nr_pages = npages; + area->pages = pages; if (map_vm_area(area, prot_rwx, &pages)) { vunmap(area->addr); diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c index 7a9327046404..446a7f52cc11 100644 --- a/arch/tile/kernel/proc.c +++ b/arch/tile/kernel/proc.c @@ -146,7 +146,6 @@ static ctl_table unaligned_table[] = { }, {} }; -#endif static struct ctl_path tile_path[] = { { .procname = "tile" }, @@ -155,10 +154,9 @@ static struct ctl_path tile_path[] = { static int __init proc_sys_tile_init(void) { -#ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */ register_sysctl_paths(tile_path, unaligned_table); -#endif return 0; } arch_initcall(proc_sys_tile_init); +#endif diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 30caecac94dc..2d5ef617bb39 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -28,6 +28,7 @@ #include <linux/tracehook.h> #include <linux/signal.h> #include <asm/stack.h> +#include <asm/switch_to.h> #include <asm/homecache.h> #include <asm/syscalls.h> #include <asm/traps.h> @@ -285,7 +286,7 @@ struct task_struct *validate_current(void) static struct task_struct corrupt = { .comm = "<corrupt>" }; struct task_struct *tsk = current; if (unlikely((unsigned long)tsk < PAGE_OFFSET || - (void *)tsk > high_memory || + (high_memory && (void *)tsk > high_memory) || ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); tsk = &corrupt; diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 92a94f4920ad..bff23f476110 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -103,13 +103,11 @@ unsigned long __initdata pci_reserve_end_pfn = -1U; static int __init setup_maxmem(char *str) { - long maxmem_mb; - if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || - maxmem_mb == 0) + unsigned long long maxmem; + if (str == NULL || (maxmem = memparse(str, NULL)) == 0) return -EINVAL; - maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << - (HPAGE_SHIFT - PAGE_SHIFT); + maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); pr_info("Forcing RAM used to no more than %dMB\n", maxmem_pfn >> (20 - PAGE_SHIFT)); return 0; @@ -119,14 +117,15 @@ early_param("maxmem", setup_maxmem); static int __init setup_maxnodemem(char *str) { char *endp; - long maxnodemem_mb, node; + unsigned long long maxnodemem; + long node; node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; - if (node >= MAX_NUMNODES || *endp != ':' || - strict_strtol(endp+1, 0, &maxnodemem_mb) != 0) + if (node >= MAX_NUMNODES || *endp != ':') return -EINVAL; - maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << + maxnodemem = memparse(endp+1, NULL); + maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); pr_info("Forcing RAM used on node %ld to no more than %dMB\n", node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); @@ -913,6 +912,13 @@ void __cpuinit setup_cpu(int boot) #ifdef CONFIG_BLK_DEV_INITRD +/* + * Note that the kernel can potentially support other compression + * techniques than gz, though we don't do so by default. If we ever + * decide to do so we can either look for other filename extensions, + * or just allow a file with this name to be compressed with an + * arbitrary compressor (somewhat counterintuitively). + */ static int __initdata set_initramfs_file; static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; @@ -928,9 +934,9 @@ static int __init setup_initramfs_file(char *str) early_param("initramfs_file", setup_initramfs_file); /* - * We look for an additional "initramfs.cpio.gz" file in the hvfs. + * We look for an "initramfs.cpio.gz" file in the hvfs. * If there is one, we allocate some memory for it and it will be - * unpacked to the initramfs after any built-in initramfs_data. + * unpacked to the initramfs. */ static void __init load_hv_initrd(void) { diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index bc1eb586e24d..9efbc1391b3c 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c @@ -153,6 +153,25 @@ static tile_bundle_bits rewrite_load_store_unaligned( if (((unsigned long)addr % size) == 0) return bundle; + /* + * Return SIGBUS with the unaligned address, if requested. + * Note that we return SIGBUS even for completely invalid addresses + * as long as they are in fact unaligned; this matches what the + * tilepro hardware would be doing, if it could provide us with the + * actual bad address in an SPR, which it doesn't. + */ + if (unaligned_fixup == 0) { + siginfo_t info = { + .si_signo = SIGBUS, + .si_code = BUS_ADRALN, + .si_addr = addr + }; + trace_unhandled_signal("unaligned trap", regs, + (unsigned long)addr, SIGBUS); + force_sig_info(info.si_signo, &info, current); + return (tilepro_bundle_bits) 0; + } + #ifndef __LITTLE_ENDIAN # error We assume little-endian representation with copy_xx_user size 2 here #endif @@ -192,18 +211,6 @@ static tile_bundle_bits rewrite_load_store_unaligned( return (tile_bundle_bits) 0; } - if (unaligned_fixup == 0) { - siginfo_t info = { - .si_signo = SIGBUS, - .si_code = BUS_ADRALN, - .si_addr = addr - }; - trace_unhandled_signal("unaligned trap", regs, - (unsigned long)addr, SIGBUS); - force_sig_info(info.si_signo, &info, current); - return (tile_bundle_bits) 0; - } - if (unaligned_printk || unaligned_fixup_count == 0) { pr_info("Process %d/%s: PC %#lx: Fixup of" " unaligned %s at %#lx.\n", diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index a44e103c5a63..91da0f721958 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c @@ -103,7 +103,7 @@ static void smp_stop_cpu_interrupt(void) set_cpu_online(smp_processor_id(), 0); arch_local_irq_disable_all(); for (;;) - asm("nap"); + asm("nap; nop"); } /* This function calls the 'stop' function on all other CPUs in the system. */ @@ -113,6 +113,12 @@ void smp_send_stop(void) send_IPI_allbutself(MSG_TAG_STOP_CPU); } +/* On panic, just wait; we may get an smp_send_stop() later on. */ +void panic_smp_self_stop(void) +{ + while (1) + asm("nap; nop"); +} /* * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index b949edcec200..172aef7d3159 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c @@ -196,6 +196,8 @@ void __cpuinit online_secondary(void) /* This must be done before setting cpu_online_mask */ wmb(); + notify_cpu_starting(smp_processor_id()); + /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 37ee4d037e0b..b2f44c28dda6 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -21,10 +21,12 @@ #include <linux/stacktrace.h> #include <linux/uaccess.h> #include <linux/mmzone.h> +#include <linux/dcache.h> +#include <linux/fs.h> #include <asm/backtrace.h> #include <asm/page.h> -#include <asm/tlbflush.h> #include <asm/ucontext.h> +#include <asm/switch_to.h> #include <asm/sigframe.h> #include <asm/stack.h> #include <arch/abi.h> @@ -44,72 +46,23 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp) return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; } -/* Is address valid for reading? */ -static int valid_address(struct KBacktraceIterator *kbt, unsigned long address) -{ - HV_PTE *l1_pgtable = kbt->pgtable; - HV_PTE *l2_pgtable; - unsigned long pfn; - HV_PTE pte; - struct page *page; - - if (l1_pgtable == NULL) - return 0; /* can't read user space in other tasks */ - -#ifdef CONFIG_64BIT - /* Find the real l1_pgtable by looking in the l0_pgtable. */ - pte = l1_pgtable[HV_L0_INDEX(address)]; - if (!hv_pte_get_present(pte)) - return 0; - pfn = hv_pte_get_pfn(pte); - if (pte_huge(pte)) { - if (!pfn_valid(pfn)) { - pr_err("L0 huge page has bad pfn %#lx\n", pfn); - return 0; - } - return hv_pte_get_present(pte) && hv_pte_get_readable(pte); - } - page = pfn_to_page(pfn); - BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */ - l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); -#endif - pte = l1_pgtable[HV_L1_INDEX(address)]; - if (!hv_pte_get_present(pte)) - return 0; - pfn = hv_pte_get_pfn(pte); - if (pte_huge(pte)) { - if (!pfn_valid(pfn)) { - pr_err("huge page has bad pfn %#lx\n", pfn); - return 0; - } - return hv_pte_get_present(pte) && hv_pte_get_readable(pte); - } - - page = pfn_to_page(pfn); - if (PageHighMem(page)) { - pr_err("L2 page table not in LOWMEM (%#llx)\n", - HV_PFN_TO_CPA(pfn)); - return 0; - } - l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); - pte = l2_pgtable[HV_L2_INDEX(address)]; - return hv_pte_get_present(pte) && hv_pte_get_readable(pte); -} - /* Callback for backtracer; basically a glorified memcpy */ static bool read_memory_func(void *result, unsigned long address, unsigned int size, void *vkbt) { int retval; struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; + + if (address == 0) + return 0; if (__kernel_text_address(address)) { /* OK to read kernel code. */ } else if (address >= PAGE_OFFSET) { /* We only tolerate kernel-space reads of this task's stack */ if (!in_kernel_stack(kbt, address)) return 0; - } else if (!valid_address(kbt, address)) { - return 0; /* invalid user-space address */ + } else if (!kbt->is_current) { + return 0; /* can't read from other user address spaces */ } pagefault_disable(); retval = __copy_from_user_inatomic(result, @@ -127,6 +80,8 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) unsigned long sp = kbt->it.sp; struct pt_regs *p; + if (sp % sizeof(long) != 0) + return NULL; if (!in_kernel_stack(kbt, sp)) return NULL; if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) @@ -169,27 +124,27 @@ static int is_sigreturn(unsigned long pc) } /* Return a pt_regs pointer for a valid signal handler frame */ -static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) +static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt, + struct rt_sigframe* kframe) { BacktraceIterator *b = &kbt->it; - if (b->pc == VDSO_BASE) { - struct rt_sigframe *frame; - unsigned long sigframe_top = - b->sp + sizeof(struct rt_sigframe) - 1; - if (!valid_address(kbt, b->sp) || - !valid_address(kbt, sigframe_top)) { - if (kbt->verbose) - pr_err(" (odd signal: sp %#lx?)\n", - (unsigned long)(b->sp)); + if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET && + b->sp % sizeof(long) == 0) { + int retval; + pagefault_disable(); + retval = __copy_from_user_inatomic( + kframe, (void __user __force *)b->sp, + sizeof(*kframe)); + pagefault_enable(); + if (retval != 0 || + (unsigned int)(kframe->info.si_signo) >= _NSIG) return NULL; - } - frame = (struct rt_sigframe *)b->sp; if (kbt->verbose) { pr_err(" <received signal %d>\n", - frame->info.si_signo); + kframe->info.si_signo); } - return (struct pt_regs *)&frame->uc.uc_mcontext; + return (struct pt_regs *)&kframe->uc.uc_mcontext; } return NULL; } @@ -202,10 +157,11 @@ static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) { struct pt_regs *p; + struct rt_sigframe kframe; p = valid_fault_handler(kbt); if (p == NULL) - p = valid_sigframe(kbt); + p = valid_sigframe(kbt, &kframe); if (p == NULL) return 0; backtrace_init(&kbt->it, read_memory_func, kbt, @@ -265,41 +221,19 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt, /* * Set up callback information. We grab the kernel stack base - * so we will allow reads of that address range, and if we're - * asking about the current process we grab the page table - * so we can check user accesses before trying to read them. - * We flush the TLB to avoid any weird skew issues. + * so we will allow reads of that address range. */ - is_current = (t == NULL); + is_current = (t == NULL || t == current); kbt->is_current = is_current; if (is_current) t = validate_current(); kbt->task = t; - kbt->pgtable = NULL; kbt->verbose = 0; /* override in caller if desired */ kbt->profile = 0; /* override in caller if desired */ kbt->end = KBT_ONGOING; - kbt->new_context = 0; - if (is_current) { - HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; - if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) { - /* - * Not just an optimization: this also allows - * this to work at all before va/pa mappings - * are set up. - */ - kbt->pgtable = swapper_pg_dir; - } else { - struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa)); - if (!PageHighMem(page)) - kbt->pgtable = __va(pgdir_pa); - else - pr_err("page table not in LOWMEM" - " (%#llx)\n", pgdir_pa); - } - local_flush_tlb_all(); + kbt->new_context = 1; + if (is_current) validate_stack(regs); - } if (regs == NULL) { if (is_current || t->state == TASK_RUNNING) { @@ -345,6 +279,78 @@ void KBacktraceIterator_next(struct KBacktraceIterator *kbt) } EXPORT_SYMBOL(KBacktraceIterator_next); +static void describe_addr(struct KBacktraceIterator *kbt, + unsigned long address, + int have_mmap_sem, char *buf, size_t bufsize) +{ + struct vm_area_struct *vma; + size_t namelen, remaining; + unsigned long size, offset, adjust; + char *p, *modname; + const char *name; + int rc; + + /* + * Look one byte back for every caller frame (i.e. those that + * aren't a new context) so we look up symbol data for the + * call itself, not the following instruction, which may be on + * a different line (or in a different function). + */ + adjust = !kbt->new_context; + address -= adjust; + + if (address >= PAGE_OFFSET) { + /* Handle kernel symbols. */ + BUG_ON(bufsize < KSYM_NAME_LEN); + name = kallsyms_lookup(address, &size, &offset, + &modname, buf); + if (name == NULL) { + buf[0] = '\0'; + return; + } + namelen = strlen(buf); + remaining = (bufsize - 1) - namelen; + p = buf + namelen; + rc = snprintf(p, remaining, "+%#lx/%#lx ", + offset + adjust, size); + if (modname && rc < remaining) + snprintf(p + rc, remaining - rc, "[%s] ", modname); + buf[bufsize-1] = '\0'; + return; + } + + /* If we don't have the mmap_sem, we can't show any more info. */ + buf[0] = '\0'; + if (!have_mmap_sem) + return; + + /* Find vma info. */ + vma = find_vma(kbt->task->mm, address); + if (vma == NULL || address < vma->vm_start) { + snprintf(buf, bufsize, "[unmapped address] "); + return; + } + + if (vma->vm_file) { + char *s; + p = d_path(&vma->vm_file->f_path, buf, bufsize); + if (IS_ERR(p)) + p = "?"; + s = strrchr(p, '/'); + if (s) + p = s+1; + } else { + p = "anon"; + } + + /* Generate a string description of the vma info. */ + namelen = strlen(p); + remaining = (bufsize - 1) - namelen; + memmove(buf, p, namelen); + snprintf(buf + namelen, remaining, "[%lx+%lx] ", + vma->vm_start, vma->vm_end - vma->vm_start); +} + /* * This method wraps the backtracer's more generic support. * It is only invoked from the architecture-specific code; show_stack() @@ -353,6 +359,7 @@ EXPORT_SYMBOL(KBacktraceIterator_next); void tile_show_stack(struct KBacktraceIterator *kbt, int headers) { int i; + int have_mmap_sem = 0; if (headers) { /* @@ -369,31 +376,16 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) kbt->verbose = 1; i = 0; for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { - char *modname; - const char *name; - unsigned long address = kbt->it.pc; - unsigned long offset, size; char namebuf[KSYM_NAME_LEN+100]; + unsigned long address = kbt->it.pc; - if (address >= PAGE_OFFSET) - name = kallsyms_lookup(address, &size, &offset, - &modname, namebuf); - else - name = NULL; - - if (!name) - namebuf[0] = '\0'; - else { - size_t namelen = strlen(namebuf); - size_t remaining = (sizeof(namebuf) - 1) - namelen; - char *p = namebuf + namelen; - int rc = snprintf(p, remaining, "+%#lx/%#lx ", - offset, size); - if (modname && rc < remaining) - snprintf(p + rc, remaining - rc, - "[%s] ", modname); - namebuf[sizeof(namebuf)-1] = '\0'; - } + /* Try to acquire the mmap_sem as we pass into userspace. */ + if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm) + have_mmap_sem = + down_read_trylock(&kbt->task->mm->mmap_sem); + + describe_addr(kbt, address, have_mmap_sem, + namebuf, sizeof(namebuf)); pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", i++, address, namebuf, (unsigned long)(kbt->it.sp)); @@ -408,6 +400,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) pr_err("Stack dump stopped; next frame identical to this one\n"); if (headers) pr_err("Stack dump complete\n"); + if (have_mmap_sem) + up_read(&kbt->task->mm->mmap_sem); } EXPORT_SYMBOL(tile_show_stack); diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 2bb6602a1ee7..73cff814ac57 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -200,7 +200,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, { siginfo_t info = { 0 }; int signo, code; - unsigned long address; + unsigned long address = 0; bundle_bits instr; /* Re-enable interrupts. */ @@ -223,6 +223,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, } switch (fault_num) { + case INT_MEM_ERROR: + signo = SIGBUS; + code = BUS_OBJERR; + break; case INT_ILL: if (copy_from_user(&instr, (void __user *)regs->pc, sizeof(instr))) { @@ -289,7 +293,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, address = regs->pc; break; #ifdef __tilegx__ - case INT_ILL_TRANS: + case INT_ILL_TRANS: { + /* Avoid a hardware erratum with the return address stack. */ + fill_ra_stack(); + signo = SIGSEGV; code = SEGV_MAPERR; if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) @@ -297,6 +304,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, else address = 0; /* FIXME: GX: single-step for address */ break; + } #endif default: panic("Unexpected do_trap interrupt number %d", fault_num); @@ -308,7 +316,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, info.si_addr = (void __user *)address; if (signo == SIGILL) info.si_trapno = fault_num; - trace_unhandled_signal("trap", regs, address, signo); + if (signo != SIGTRAP) + trace_unhandled_signal("trap", regs, address, signo); force_sig_info(signo, &info, current); } diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile index 0c26086ecbef..985f59858234 100644 --- a/arch/tile/lib/Makefile +++ b/arch/tile/lib/Makefile @@ -7,6 +7,7 @@ lib-y = cacheflush.o checksum.o cpumask.o delay.o uaccess.o \ strchr_$(BITS).o strlen_$(BITS).o ifeq ($(CONFIG_TILEGX),y) +CFLAGS_REMOVE_memcpy_user_64.o = -fno-omit-frame-pointer lib-y += memcpy_user_64.o else lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c index 8928aace7a64..db4fb89e12d8 100644 --- a/arch/tile/lib/cacheflush.c +++ b/arch/tile/lib/cacheflush.c @@ -39,7 +39,21 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) { char *p, *base; size_t step_size, load_count; + + /* + * On TILEPro the striping granularity is a fixed 8KB; on + * TILE-Gx it is configurable, and we rely on the fact that + * the hypervisor always configures maximum striping, so that + * bits 9 and 10 of the PA are part of the stripe function, so + * every 512 bytes we hit a striping boundary. + * + */ +#ifdef __tilegx__ + const unsigned long STRIPE_WIDTH = 512; +#else const unsigned long STRIPE_WIDTH = 8192; +#endif + #ifdef __tilegx__ /* * On TILE-Gx, we must disable the dstream prefetcher before doing @@ -74,7 +88,7 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) * memory, that one load would be sufficient, but since we may * be, we also need to back up to the last load issued to * another memory controller, which would be the point where - * we crossed an 8KB boundary (the granularity of striping + * we crossed a "striping" boundary (the granularity of striping * across memory controllers). Keep backing up and doing this * until we are before the beginning of the buffer, or have * hit all the controllers. @@ -88,12 +102,22 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) * every cache line on a full memory stripe on each * controller" that we simply do that, to simplify the logic. * - * FIXME: See bug 9535 for some issues with this code. + * On TILE-Gx the hash-for-home function is much more complex, + * with the upshot being we can't readily guarantee we have + * hit both entries in the 128-entry AMT that were hit by any + * load in the entire range, so we just re-load them all. + * With larger buffers, we may want to consider using a hypervisor + * trap to issue loads directly to each hash-for-home tile for + * each controller (doing it from Linux would trash the TLB). */ if (hfh) { step_size = L2_CACHE_BYTES; +#ifdef __tilegx__ + load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES; +#else load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) * (1 << CHIP_LOG_NUM_MSHIMS()); +#endif } else { step_size = STRIPE_WIDTH; load_count = (1 << CHIP_LOG_NUM_MSHIMS()); @@ -109,7 +133,7 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) /* Figure out how far back we need to go. */ base = p - (step_size * (load_count - 2)); - if ((long)base < (long)buffer) + if ((unsigned long)base < (unsigned long)buffer) base = buffer; /* diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c index 4763b3aff1cc..37440caa7370 100644 --- a/arch/tile/lib/memcpy_user_64.c +++ b/arch/tile/lib/memcpy_user_64.c @@ -14,7 +14,13 @@ * Do memcpy(), but trap and return "n" when a load or store faults. * * Note: this idiom only works when memcpy() compiles to a leaf function. - * If "sp" is updated during memcpy, the "jrp lr" will be incorrect. + * Here leaf function not only means it does not have calls, but also + * requires no stack operations (sp, stack frame pointer) and no + * use of callee-saved registers, else "jrp lr" will be incorrect since + * unwinding stack frame is bypassed. Since memcpy() is not complex so + * these conditions are satisfied here, but we need to be careful when + * modifying this file. This is not a clean solution but is the best + * one so far. * * Also note that we are capturing "n" from the containing scope here. */ diff --git a/arch/tile/lib/spinlock_common.h b/arch/tile/lib/spinlock_common.h index c10109809132..6ac37509faca 100644 --- a/arch/tile/lib/spinlock_common.h +++ b/arch/tile/lib/spinlock_common.h @@ -60,5 +60,5 @@ static void delay_backoff(int iterations) loops += __insn_crc32_32(stack_pointer, get_cycles_low()) & (loops - 1); - relax(1 << exponent); + relax(loops); } diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index cba30e9547b4..22e58f51ed23 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -130,7 +130,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) } /* - * Handle a fault on the vmalloc or module mapping area + * Handle a fault on the vmalloc area. */ static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) { @@ -203,9 +203,14 @@ static pgd_t *get_current_pgd(void) * interrupt or a critical region, and must do as little as possible. * Similarly, we can't use atomic ops here, since we may be handling a * fault caused by an atomic op access. + * + * If we find a migrating PTE while we're in an NMI context, and we're + * at a PC that has a registered exception handler, we don't wait, + * since this thread may (e.g.) have been interrupted while migrating + * its own stack, which would then cause us to self-deadlock. */ static int handle_migrating_pte(pgd_t *pgd, int fault_num, - unsigned long address, + unsigned long address, unsigned long pc, int is_kernel_mode, int write) { pud_t *pud; @@ -227,6 +232,8 @@ static int handle_migrating_pte(pgd_t *pgd, int fault_num, pte_offset_kernel(pmd, address); pteval = *pte; if (pte_migrating(pteval)) { + if (in_nmi() && search_exception_tables(pc)) + return 0; wait_for_migration(pte); return 1; } @@ -300,7 +307,7 @@ static int handle_page_fault(struct pt_regs *regs, * rather than trying to patch up the existing PTE. */ pgd = get_current_pgd(); - if (handle_migrating_pte(pgd, fault_num, address, + if (handle_migrating_pte(pgd, fault_num, address, regs->pc, is_kernel_mode, write)) return 1; @@ -335,9 +342,12 @@ static int handle_page_fault(struct pt_regs *regs, /* * If we're trying to touch user-space addresses, we must * be either at PL0, or else with interrupts enabled in the - * kernel, so either way we can re-enable interrupts here. + * kernel, so either way we can re-enable interrupts here + * unless we are doing atomic access to user space with + * interrupts disabled. */ - local_irq_enable(); + if (!(regs->flags & PT_FLAGS_DISABLE_IRQ)) + local_irq_enable(); mm = tsk->mm; @@ -665,7 +675,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, */ if (fault_num == INT_DTLB_ACCESS) write = 1; - if (handle_migrating_pte(pgd, fault_num, address, 1, write)) + if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write)) return state; /* Return zero so that we continue on with normal fault handling. */ diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 1cc6ae477c98..499f73770b05 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -394,6 +394,7 @@ int page_home(struct page *page) return pte_to_home(*virt_to_pte(NULL, kva)); } } +EXPORT_SYMBOL(page_home); void homecache_change_page_home(struct page *page, int order, int home) { diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 830c4908ea76..6a9d20ddc34f 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -254,11 +254,6 @@ static pgprot_t __init init_pgprot(ulong address) return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); } - /* As a performance optimization, keep the boot init stack here. */ - if (address >= (ulong)&init_thread_union && - address < (ulong)&init_thread_union + THREAD_SIZE) - return construct_pgprot(PAGE_KERNEL, smp_processor_id()); - #ifndef __tilegx__ #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() /* Force the atomic_locks[] array page to be hash-for-home. */ @@ -557,6 +552,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) address = MEM_SV_INTRPT; pmd = get_pmd(pgtables, address); + pfn = 0; /* code starts at PA 0 */ if (ktext_small) { /* Allocate an L2 PTE for the kernel text */ int cpu = 0; @@ -579,10 +575,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) } BUG_ON(address != (unsigned long)_stext); - pfn = 0; /* code starts at PA 0 */ - pte = alloc_pte(); - for (pte_ofs = 0; address < (unsigned long)_einittext; - pfn++, pte_ofs++, address += PAGE_SIZE) { + pte = NULL; + for (; address < (unsigned long)_einittext; + pfn++, address += PAGE_SIZE) { + pte_ofs = pte_index(address); + if (pte_ofs == 0) { + if (pte) + assign_pte(pmd++, pte); + pte = alloc_pte(); + } if (!ktext_local) { prot = set_remote_cache_cpu(prot, cpu); cpu = cpumask_next(cpu, &ktext_mask); @@ -591,7 +592,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) } pte[pte_ofs] = pfn_pte(pfn, prot); } - assign_pte(pmd, pte); + if (pte) + assign_pte(pmd, pte); } else { pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); pteval = pte_mkhuge(pteval); @@ -614,7 +616,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) else pteval = hv_pte_set_mode(pteval, HV_PTE_MODE_CACHE_NO_L3); - *(pte_t *)pmd = pteval; + for (; address < (unsigned long)_einittext; + pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE) + *(pte_t *)(pmd++) = pfn_pte(pfn, pteval); } /* Set swapper_pgprot here so it is flushed to memory right away. */ diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 87303693a072..2410aa899b3e 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr) if (!pmd_huge_page(*pmd)) return; - /* - * Grab the pgd_lock, since we may need it to walk the pgd_list, - * and since we need some kind of lock here to avoid races. - */ - spin_lock_irqsave(&pgd_lock, flags); + spin_lock_irqsave(&init_mm.page_table_lock, flags); if (!pmd_huge_page(*pmd)) { /* Lost the race to convert the huge page. */ - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock_irqrestore(&init_mm.page_table_lock, flags); return; } @@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr) #ifdef __PAGETABLE_PMD_FOLDED /* Walk every pgd on the system and update the pmd there. */ + spin_lock(&pgd_lock); list_for_each(pos, &pgd_list) { pmd_t *copy_pmd; pgd = list_to_pgd(pos) + pgd_index(addr); @@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr) copy_pmd = pmd_offset(pud, addr); __set_pmd(copy_pmd, *pmd); } + spin_unlock(&pgd_lock); #endif /* Tell every cpu to notice the change. */ @@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr) cpu_possible_mask, NULL, 0); /* Hold the lock until the TLB flush is finished to avoid races. */ - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock_irqrestore(&init_mm.page_table_lock, flags); } /* @@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr) * against pageattr.c; it is the unique case in which a valid change * of kernel pagetables can't be lazily synchronized by vmalloc faults. * vmalloc faults work because attached pagetables are never freed. - * The locking scheme was chosen on the basis of manfred's - * recommendations and having no core impact whatsoever. - * -- wli + * + * The lock is always taken with interrupts disabled, unlike on x86 + * and other platforms, because we need to take the lock in + * shatter_huge_page(), which may be called from an interrupt context. + * We are not at risk from the tlbflush IPI deadlock that was seen on + * x86, since we use the flush_remote() API to have the hypervisor do + * the TLB flushes regardless of irq disabling. */ DEFINE_SPINLOCK(pgd_lock); LIST_HEAD(pgd_list); @@ -469,10 +471,18 @@ void __set_pte(pte_t *ptep, pte_t pte) void set_pte(pte_t *ptep, pte_t pte) { - struct page *page = pfn_to_page(pte_pfn(pte)); - - /* Update the home of a PTE if necessary */ - pte = pte_set_home(pte, page_home(page)); + if (pte_present(pte) && + (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) { + /* The PTE actually references physical memory. */ + unsigned long pfn = pte_pfn(pte); + if (pfn_valid(pfn)) { + /* Update the home of the PTE from the struct page. */ + pte = pte_set_home(pte, page_home(pfn_to_page(pfn))); + } else if (hv_pte_get_mode(pte) == 0) { + /* remap_pfn_range(), etc, must supply PTE mode. */ + panic("set_pte(): out-of-range PFN and mode 0\n"); + } + } __set_pte(ptep, pte); } diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h index dc36b222100b..6673508f3426 100644 --- a/arch/um/drivers/cow.h +++ b/arch/um/drivers/cow.h @@ -3,41 +3,6 @@ #include <asm/types.h> -#if defined(__KERNEL__) - -# include <asm/byteorder.h> - -# if defined(__BIG_ENDIAN) -# define ntohll(x) (x) -# define htonll(x) (x) -# elif defined(__LITTLE_ENDIAN) -# define ntohll(x) be64_to_cpu(x) -# define htonll(x) cpu_to_be64(x) -# else -# error "Could not determine byte order" -# endif - -#else -/* For the definition of ntohl, htonl and __BYTE_ORDER */ -#include <endian.h> -#include <netinet/in.h> -#if defined(__BYTE_ORDER) - -# if __BYTE_ORDER == __BIG_ENDIAN -# define ntohll(x) (x) -# define htonll(x) (x) -# elif __BYTE_ORDER == __LITTLE_ENDIAN -# define ntohll(x) bswap_64(x) -# define htonll(x) bswap_64(x) -# else -# error "Could not determine byte order: __BYTE_ORDER uncorrectly defined" -# endif - -#else /* ! defined(__BYTE_ORDER) */ -# error "Could not determine byte order: __BYTE_ORDER not defined" -#endif -#endif /* ! defined(__KERNEL__) */ - extern int init_cow_file(int fd, char *cow_file, char *backing_file, int sectorsize, int alignment, int *bitmap_offset_out, unsigned long *bitmap_len_out, int *data_offset_out); diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c index 9cbb426c0b91..0ee9cc6cc4c7 100644 --- a/arch/um/drivers/cow_user.c +++ b/arch/um/drivers/cow_user.c @@ -8,11 +8,10 @@ * that. */ #include <unistd.h> -#include <byteswap.h> #include <errno.h> #include <string.h> #include <arpa/inet.h> -#include <asm/types.h> +#include <endian.h> #include "cow.h" #include "cow_sys.h" @@ -214,8 +213,8 @@ int write_cow_header(char *cow_file, int fd, char *backing_file, "header\n"); goto out; } - header->magic = htonl(COW_MAGIC); - header->version = htonl(COW_VERSION); + header->magic = htobe32(COW_MAGIC); + header->version = htobe32(COW_VERSION); err = -EINVAL; if (strlen(backing_file) > sizeof(header->backing_file) - 1) { @@ -246,10 +245,10 @@ int write_cow_header(char *cow_file, int fd, char *backing_file, goto out_free; } - header->mtime = htonl(modtime); - header->size = htonll(*size); - header->sectorsize = htonl(sectorsize); - header->alignment = htonl(alignment); + header->mtime = htobe32(modtime); + header->size = htobe64(*size); + header->sectorsize = htobe32(sectorsize); + header->alignment = htobe32(alignment); header->cow_format = COW_BITMAP; err = cow_write_file(fd, header, sizeof(*header)); @@ -301,8 +300,8 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, magic = header->v1.magic; if (magic == COW_MAGIC) version = header->v1.version; - else if (magic == ntohl(COW_MAGIC)) - version = ntohl(header->v1.version); + else if (magic == be32toh(COW_MAGIC)) + version = be32toh(header->v1.version); /* No error printed because the non-COW case comes through here */ else goto out; @@ -327,9 +326,9 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, "header\n"); goto out; } - *mtime_out = ntohl(header->v2.mtime); - *size_out = ntohll(header->v2.size); - *sectorsize_out = ntohl(header->v2.sectorsize); + *mtime_out = be32toh(header->v2.mtime); + *size_out = be64toh(header->v2.size); + *sectorsize_out = be32toh(header->v2.sectorsize); *bitmap_offset_out = sizeof(header->v2); *align_out = *sectorsize_out; file = header->v2.backing_file; @@ -341,10 +340,10 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, "header\n"); goto out; } - *mtime_out = ntohl(header->v3.mtime); - *size_out = ntohll(header->v3.size); - *sectorsize_out = ntohl(header->v3.sectorsize); - *align_out = ntohl(header->v3.alignment); + *mtime_out = be32toh(header->v3.mtime); + *size_out = be64toh(header->v3.size); + *sectorsize_out = be32toh(header->v3.sectorsize); + *align_out = be32toh(header->v3.alignment); if (*align_out == 0) { cow_printf("read_cow_header - invalid COW header, " "align == 0\n"); @@ -366,16 +365,16 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, * this was used until Dec2005 - 64bits are needed to represent * 2038+. I.e. we can safely do this truncating cast. * - * Additionally, we must use ntohl() instead of ntohll(), since + * Additionally, we must use be32toh() instead of be64toh(), since * the program used to use the former (tested - I got mtime * mismatch "0 vs whatever"). * * Ever heard about bug-to-bug-compatibility ? ;-) */ - *mtime_out = (time32_t) ntohl(header->v3_b.mtime); + *mtime_out = (time32_t) be32toh(header->v3_b.mtime); - *size_out = ntohll(header->v3_b.size); - *sectorsize_out = ntohl(header->v3_b.sectorsize); - *align_out = ntohl(header->v3_b.alignment); + *size_out = be64toh(header->v3_b.size); + *sectorsize_out = be32toh(header->v3_b.sectorsize); + *align_out = be32toh(header->v3_b.alignment); if (*align_out == 0) { cow_printf("read_cow_header - invalid COW header, " "align == 0\n"); diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index e672bd6d43e3..43b39d61b538 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -22,6 +22,7 @@ #include <linux/workqueue.h> #include <linux/mutex.h> #include <asm/uaccess.h> +#include <asm/switch_to.h> #include "init.h" #include "irq_kern.h" diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 8419f5cf2ac7..fff24352255d 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -1,3 +1,4 @@ generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h -generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h +generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h +generic-y += switch_to.h diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 492bc4c1b62b..65a1c3d690ea 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -3,9 +3,10 @@ # Licensed under the GPL # -CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ - -DELF_ARCH=$(LDS_ELF_ARCH) \ - -DELF_FORMAT=$(LDS_ELF_FORMAT) +CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ + -DELF_ARCH=$(LDS_ELF_ARCH) \ + -DELF_FORMAT=$(LDS_ELF_FORMAT) \ + $(LDS_EXTRA) extra-y := vmlinux.lds clean-files := diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index f386d04a84a5..2b73dedb44ca 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -88,11 +88,8 @@ static inline void set_current(struct task_struct *task) extern void arch_switch_to(struct task_struct *to); -void *_switch_to(void *prev, void *next, void *last) +void *__switch_to(struct task_struct *from, struct task_struct *to) { - struct task_struct *from = prev; - struct task_struct *to = next; - to->thread.prev_sched = from; set_current(to); @@ -111,7 +108,6 @@ void *_switch_to(void *prev, void *next, void *last) } while (current->thread.saved_task); return current->thread.prev_sched; - } void interrupt_end(void) diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 4947b319f53a..0a49ef0c2bf4 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -103,7 +103,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) void uml_setup_stubs(struct mm_struct *mm) { - struct page **pages; int err, ret; if (!skas_needs_stub) diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 9258e592f414..366460a81796 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -82,20 +82,26 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - return dma_ops->alloc_coherent(dev, size, dma_handle, flag); + return dma_ops->alloc(dev, size, dma_handle, flag, attrs); } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); + dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c index bfa9fbb2bbb1..16c08b2143a7 100644 --- a/arch/unicore32/mm/dma-swiotlb.c +++ b/arch/unicore32/mm/dma-swiotlb.c @@ -17,9 +17,23 @@ #include <asm/dma.h> +static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + return swiotlb_alloc_coherent(dev, size, dma_handle, flags); +} + +static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, + struct dma_attrs *attrs) +{ + swiotlb_free_coherent(dev, size, vaddr, dma_addr); +} + struct dma_map_ops swiotlb_dma_map_ops = { - .alloc_coherent = swiotlb_alloc_coherent, - .free_coherent = swiotlb_free_coherent, + .alloc = unicore_swiotlb_alloc_coherent, + .free = unicore_swiotlb_free_coherent, .map_sg = swiotlb_map_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs, .dma_supported = swiotlb_dma_supported, diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 968dbe24a255..41a7237606a3 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -129,6 +129,7 @@ KBUILD_CFLAGS += -Wno-sign-compare KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # prevent gcc from generating any FP code by mistake KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) +KBUILD_CFLAGS += $(call cc-option,-mno-avx,) KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index 4be406abeefd..36b62bc52638 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um @@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32) export LDFLAGS +LDS_EXTRA := -Ui386 +export LDS_EXTRA + # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. include $(srctree)/arch/x86/Makefile_32.cpu diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index ed3065fd6314..4b4331d71935 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -59,7 +59,8 @@ extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag); + dma_addr_t *dma_addr, gfp_t flag, + struct dma_attrs *attrs); static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { @@ -111,9 +112,11 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) return gfp; } +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + static inline void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp) +dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); void *memory; @@ -129,18 +132,21 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, if (!is_device_dma_capable(dev)) return NULL; - if (!ops->alloc_coherent) + if (!ops->alloc) return NULL; - memory = ops->alloc_coherent(dev, size, dma_handle, - dma_alloc_coherent_gfp_flags(dev, gfp)); + memory = ops->alloc(dev, size, dma_handle, + dma_alloc_coherent_gfp_flags(dev, gfp), attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, memory); return memory; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t bus) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t bus, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -150,8 +156,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size, return; debug_dma_free_coherent(dev, size, vaddr, bus); - if (ops->free_coherent) - ops->free_coherent(dev, size, vaddr, bus); + if (ops->free) + ops->free(dev, size, vaddr, bus, attrs); } #endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7284c9a6a0b5..4fa7dcceb6c0 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -974,16 +974,6 @@ extern bool cpu_has_amd_erratum(const int *); #define cpu_has_amd_erratum(x) (false) #endif /* CONFIG_CPU_SUP_AMD */ -#ifdef CONFIG_X86_32 -/* - * disable hlt during certain critical i/o operations - */ -#define HAVE_DISABLE_HLT -#endif - -void disable_hlt(void); -void enable_hlt(void); - void cpu_idle_wait(void); extern unsigned long arch_align_stack(unsigned long sp); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8be5f54d9360..e0544597cfe7 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -557,6 +557,8 @@ struct __large_struct { unsigned long buf[100]; }; extern unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n); +extern __must_check long +strncpy_from_user(char *dst, const char __user *src, long count); /* * movsl can be slow when source and dest are not both 8-byte aligned diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 566e803cc602..8084bc73b18c 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -213,11 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to, return n; } -long __must_check strncpy_from_user(char *dst, const char __user *src, - long count); -long __must_check __strncpy_from_user(char *dst, - const char __user *src, long count); - /** * strlen_user: - Get the size of a string in user space. * @str: The string to measure. diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 1c66d30971ad..fcd4b6f3ef02 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -208,10 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) } } -__must_check long -strncpy_from_user(char *dst, const char __user *src, long count); -__must_check long -__strncpy_from_user(char *dst, const char __user *src, long count); __must_check long strnlen_user(const char __user *str, long n); __must_check long __strnlen_user(const char __user *str, long n); __must_check long strlen_user(const char __user *str); diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..6fe6767b7124 --- /dev/null +++ b/arch/x86/include/asm/word-at-a-time.h @@ -0,0 +1,46 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +/* + * This is largely generic for little-endian machines, but the + * optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) + +/* Return the high bit set in the first byte that is a zero */ +static inline unsigned long has_zero(unsigned long a) +{ + return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); +} + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index b1e7c7f7a0af..e66311200cbd 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -477,7 +477,7 @@ error: /* allocate and map a coherent mapping */ static void * gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, - gfp_t flag) + gfp_t flag, struct dma_attrs *attrs) { dma_addr_t paddr; unsigned long align_mask; @@ -500,7 +500,8 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, } __free_pages(page, get_order(size)); } else - return dma_generic_alloc_coherent(dev, size, dma_addr, flag); + return dma_generic_alloc_coherent(dev, size, dma_addr, flag, + attrs); return NULL; } @@ -508,7 +509,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, /* free a coherent mapping */ static void gart_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr) + dma_addr_t dma_addr, struct dma_attrs *attrs) { gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); free_pages((unsigned long)vaddr, get_order(size)); @@ -700,8 +701,8 @@ static struct dma_map_ops gart_dma_ops = { .unmap_sg = gart_unmap_sg, .map_page = gart_map_page, .unmap_page = gart_unmap_page, - .alloc_coherent = gart_alloc_coherent, - .free_coherent = gart_free_coherent, + .alloc = gart_alloc_coherent, + .free = gart_free_coherent, .mapping_error = gart_mapping_error, }; diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ef484d9d0a25..a2dfacfd7103 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -1271,6 +1271,17 @@ done: return num ? -EINVAL : 0; } +PMU_FORMAT_ATTR(cccr, "config:0-31" ); +PMU_FORMAT_ATTR(escr, "config:32-62"); +PMU_FORMAT_ATTR(ht, "config:63" ); + +static struct attribute *intel_p4_formats_attr[] = { + &format_attr_cccr.attr, + &format_attr_escr.attr, + &format_attr_ht.attr, + NULL, +}; + static __initconst const struct x86_pmu p4_pmu = { .name = "Netburst P4/Xeon", .handle_irq = p4_pmu_handle_irq, @@ -1305,6 +1316,8 @@ static __initconst const struct x86_pmu p4_pmu = { * the former idea is taken from OProfile code */ .perfctr_second_write = 1, + + .format_attrs = intel_p4_formats_attr, }; __init int p4_pmu_init(void) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 7943e0c21bde..3dafc6003b7c 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -282,8 +282,13 @@ void fixup_irqs(void) else if (!(warned++)) set_affinity = 0; + /* + * We unmask if the irq was not marked masked by the + * core code. That respects the lazy irq disable + * behaviour. + */ if (!irqd_can_move_in_process_context(data) && - !irqd_irq_disabled(data) && chip->irq_unmask) + !irqd_irq_masked(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index 90fcf62854bb..1d5d31ea686b 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c @@ -68,16 +68,9 @@ static ssize_t setup_data_read(struct file *file, char __user *user_buf, return count; } -static int setup_data_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - - return 0; -} - static const struct file_operations fops_setup_data = { .read = setup_data_read, - .open = setup_data_open, + .open = simple_open, .llseek = default_llseek, }; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index db6720edfdd0..8bfb6146f753 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -43,6 +43,8 @@ #include <linux/smp.h> #include <linux/nmi.h> #include <linux/hw_breakpoint.h> +#include <linux/uaccess.h> +#include <linux/memory.h> #include <asm/debugreg.h> #include <asm/apicdef.h> @@ -741,6 +743,64 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) regs->ip = ip; } +int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) +{ + int err; + char opc[BREAK_INSTR_SIZE]; + + bpt->type = BP_BREAKPOINT; + err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, + BREAK_INSTR_SIZE); + if (err) + return err; + err = probe_kernel_write((char *)bpt->bpt_addr, + arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); +#ifdef CONFIG_DEBUG_RODATA + if (!err) + return err; + /* + * It is safe to call text_poke() because normal kernel execution + * is stopped on all cores, so long as the text_mutex is not locked. + */ + if (mutex_is_locked(&text_mutex)) + return -EBUSY; + text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, + BREAK_INSTR_SIZE); + err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); + if (err) + return err; + if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) + return -EINVAL; + bpt->type = BP_POKE_BREAKPOINT; +#endif /* CONFIG_DEBUG_RODATA */ + return err; +} + +int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) +{ +#ifdef CONFIG_DEBUG_RODATA + int err; + char opc[BREAK_INSTR_SIZE]; + + if (bpt->type != BP_POKE_BREAKPOINT) + goto knl_write; + /* + * It is safe to call text_poke() because normal kernel execution + * is stopped on all cores, so long as the text_mutex is not locked. + */ + if (mutex_is_locked(&text_mutex)) + goto knl_write; + text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); + err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); + if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) + goto knl_write; + return err; +knl_write: +#endif /* CONFIG_DEBUG_RODATA */ + return probe_kernel_write((char *)bpt->bpt_addr, + (char *)bpt->saved_instr, BREAK_INSTR_SIZE); +} + struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: */ .gdb_bpt_instr = { 0xcc }, diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 694d801bf606..b8ba6e4a27e4 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -38,6 +38,7 @@ #include <asm/traps.h> #include <asm/desc.h> #include <asm/tlbflush.h> +#include <asm/idle.h> static int kvmapf = 1; @@ -253,7 +254,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) kvm_async_pf_task_wait((u32)read_cr2()); break; case KVM_PV_REASON_PAGE_READY: + rcu_irq_enter(); + exit_idle(); kvm_async_pf_task_wake((u32)read_cr2()); + rcu_irq_exit(); break; } } diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 6ac5782f4d6b..d0b2fb9ccbb1 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -430,7 +430,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, } static void* calgary_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) { void *ret = NULL; dma_addr_t mapping; @@ -463,7 +463,8 @@ error: } static void calgary_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { unsigned int npages; struct iommu_table *tbl = find_iommu_table(dev); @@ -476,8 +477,8 @@ static void calgary_free_coherent(struct device *dev, size_t size, } static struct dma_map_ops calgary_dma_ops = { - .alloc_coherent = calgary_alloc_coherent, - .free_coherent = calgary_free_coherent, + .alloc = calgary_alloc_coherent, + .free = calgary_free_coherent, .map_sg = calgary_map_sg, .unmap_sg = calgary_unmap_sg, .map_page = calgary_map_page, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 28e5e06fcba4..3003250ac51d 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -96,7 +96,8 @@ void __init pci_iommu_alloc(void) } } void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag) + dma_addr_t *dma_addr, gfp_t flag, + struct dma_attrs *attrs) { unsigned long dma_mask; struct page *page; diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 3af4af810c07..f96050685b46 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -75,7 +75,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, } static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr) + dma_addr_t dma_addr, struct dma_attrs *attrs) { free_pages((unsigned long)vaddr, get_order(size)); } @@ -96,8 +96,8 @@ static void nommu_sync_sg_for_device(struct device *dev, } struct dma_map_ops nommu_dma_ops = { - .alloc_coherent = dma_generic_alloc_coherent, - .free_coherent = nommu_free_coherent, + .alloc = dma_generic_alloc_coherent, + .free = nommu_free_coherent, .map_sg = nommu_map_sg, .map_page = nommu_map_page, .sync_single_for_device = nommu_sync_single_for_device, diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 8f972cbddef0..6c483ba98b9c 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -15,21 +15,30 @@ int swiotlb __read_mostly; static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) { void *vaddr; - vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); + vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags, + attrs); if (vaddr) return vaddr; return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); } +static void x86_swiotlb_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, + struct dma_attrs *attrs) +{ + swiotlb_free_coherent(dev, size, vaddr, dma_addr); +} + static struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, - .alloc_coherent = x86_swiotlb_alloc_coherent, - .free_coherent = swiotlb_free_coherent, + .alloc = x86_swiotlb_alloc_coherent, + .free = x86_swiotlb_free_coherent, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index a33afaa5ddb7..1d92a5ab6e8b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -362,34 +362,10 @@ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); #endif -#ifdef CONFIG_X86_32 -/* - * This halt magic was a workaround for ancient floppy DMA - * wreckage. It should be safe to remove. - */ -static int hlt_counter; -void disable_hlt(void) -{ - hlt_counter++; -} -EXPORT_SYMBOL(disable_hlt); - -void enable_hlt(void) -{ - hlt_counter--; -} -EXPORT_SYMBOL(enable_hlt); - -static inline int hlt_use_halt(void) -{ - return (!hlt_counter && boot_cpu_data.hlt_works_ok); -} -#else static inline int hlt_use_halt(void) { return 1; } -#endif #ifndef CONFIG_SMP static inline void play_dead(void) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index a73f0c104813..173df38dbda5 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -369,7 +369,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) case MSR_CORE_PERF_FIXED_CTR_CTRL: if (pmu->fixed_ctr_ctrl == data) return 0; - if (!(data & 0xfffffffffffff444)) { + if (!(data & 0xfffffffffffff444ull)) { reprogram_fixed_counters(pmu, data); return 0; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 280751c84724..ad85adfef843 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3906,7 +3906,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); vmx_set_cr4(&vmx->vcpu, 0); vmx_set_efer(&vmx->vcpu, 0); vmx_fpu_activate(&vmx->vcpu); diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index 97be9cb54483..57252c928f56 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -7,6 +7,8 @@ #include <linux/highmem.h> #include <linux/module.h> +#include <asm/word-at-a-time.h> + /* * best effort, GUP based copy_from_user() that is NMI-safe */ @@ -41,3 +43,104 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) return len; } EXPORT_SYMBOL_GPL(copy_from_user_nmi); + +static inline unsigned long count_bytes(unsigned long mask) +{ + mask = (mask - 1) & ~mask; + mask >>= 7; + return count_masked_bytes(mask); +} + +/* + * Do a strncpy, return length of string without final '\0'. + * 'count' is the user-supplied count (return 'count' if we + * hit it), 'max' is the address space maximum (and we return + * -EFAULT if we hit it). + */ +static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, long max) +{ + long res = 0; + + /* + * Truncate 'max' to the user-specified limit, so that + * we only have one limit we need to check in the loop + */ + if (max > count) + max = count; + + while (max >= sizeof(unsigned long)) { + unsigned long c; + + /* Fall back to byte-at-a-time if we get a page fault */ + if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) + break; + /* This can write a few bytes past the NUL character, but that's ok */ + *(unsigned long *)(dst+res) = c; + c = has_zero(c); + if (c) + return res + count_bytes(c); + res += sizeof(unsigned long); + max -= sizeof(unsigned long); + } + + while (max) { + char c; + + if (unlikely(__get_user(c,src+res))) + return -EFAULT; + dst[res] = c; + if (!c) + return res; + res++; + max--; + } + + /* + * Uhhuh. We hit 'max'. But was that the user-specified maximum + * too? If so, that's ok - we got as much as the user asked for. + */ + if (res >= count) + return count; + + /* + * Nope: we hit the address space limit, and we still had more + * characters the caller would have wanted. That's an EFAULT. + */ + return -EFAULT; +} + +/** + * strncpy_from_user: - Copy a NUL terminated string from userspace. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ +long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + unsigned long max_addr, src_addr; + + if (unlikely(count <= 0)) + return 0; + + max_addr = current_thread_info()->addr_limit.seg; + src_addr = (unsigned long)src; + if (likely(src_addr < max_addr)) { + unsigned long max = max_addr - src_addr; + return do_strncpy_from_user(dst, src, count, max); + } + return -EFAULT; +} +EXPORT_SYMBOL(strncpy_from_user); diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index d9b094ca7aaa..ef2a6a5d78e3 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -33,93 +33,6 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) /* - * Copy a null terminated string from userspace. - */ - -#define __do_strncpy_from_user(dst, src, count, res) \ -do { \ - int __d0, __d1, __d2; \ - might_fault(); \ - __asm__ __volatile__( \ - " testl %1,%1\n" \ - " jz 2f\n" \ - "0: lodsb\n" \ - " stosb\n" \ - " testb %%al,%%al\n" \ - " jz 1f\n" \ - " decl %1\n" \ - " jnz 0b\n" \ - "1: subl %1,%0\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %5,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(0b,3b) \ - : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ - "=&D" (__d2) \ - : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ - : "memory"); \ -} while (0) - -/** - * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. - * @dst: Destination address, in kernel space. This buffer must be at - * least @count bytes long. - * @src: Source address, in user space. - * @count: Maximum number of bytes to copy, including the trailing NUL. - * - * Copies a NUL-terminated string from userspace to kernel space. - * Caller must check the specified block with access_ok() before calling - * this function. - * - * On success, returns the length of the string (not including the trailing - * NUL). - * - * If access to userspace fails, returns -EFAULT (some data may have been - * copied). - * - * If @count is smaller than the length of the string, copies @count bytes - * and returns @count. - */ -long -__strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res; - __do_strncpy_from_user(dst, src, count, res); - return res; -} -EXPORT_SYMBOL(__strncpy_from_user); - -/** - * strncpy_from_user: - Copy a NUL terminated string from userspace. - * @dst: Destination address, in kernel space. This buffer must be at - * least @count bytes long. - * @src: Source address, in user space. - * @count: Maximum number of bytes to copy, including the trailing NUL. - * - * Copies a NUL-terminated string from userspace to kernel space. - * - * On success, returns the length of the string (not including the trailing - * NUL). - * - * If access to userspace fails, returns -EFAULT (some data may have been - * copied). - * - * If @count is smaller than the length of the string, copies @count bytes - * and returns @count. - */ -long -strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res = -EFAULT; - if (access_ok(VERIFY_READ, src, 1)) - __do_strncpy_from_user(dst, src, count, res); - return res; -} -EXPORT_SYMBOL(strncpy_from_user); - -/* * Zero Userspace */ diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index b7c2849ffb66..0d0326f388c0 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -9,55 +9,6 @@ #include <asm/uaccess.h> /* - * Copy a null terminated string from userspace. - */ - -#define __do_strncpy_from_user(dst,src,count,res) \ -do { \ - long __d0, __d1, __d2; \ - might_fault(); \ - __asm__ __volatile__( \ - " testq %1,%1\n" \ - " jz 2f\n" \ - "0: lodsb\n" \ - " stosb\n" \ - " testb %%al,%%al\n" \ - " jz 1f\n" \ - " decq %1\n" \ - " jnz 0b\n" \ - "1: subq %1,%0\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movq %5,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(0b,3b) \ - : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ - "=&D" (__d2) \ - : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ - : "memory"); \ -} while (0) - -long -__strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res; - __do_strncpy_from_user(dst, src, count, res); - return res; -} -EXPORT_SYMBOL(__strncpy_from_user); - -long -strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res = -EFAULT; - if (access_ok(VERIFY_READ, src, 1)) - return __strncpy_from_user(dst, src, count); - return res; -} -EXPORT_SYMBOL(strncpy_from_user); - -/* * Zero Userspace */ diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 66870223f8c5..877b9a1b2152 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S @@ -18,17 +18,17 @@ * r9d : hlen = skb->len - skb->data_len */ #define SKBDATA %r8 - -sk_load_word_ind: - .globl sk_load_word_ind - - add %ebx,%esi /* offset += X */ -# test %esi,%esi /* if (offset < 0) goto bpf_error; */ - js bpf_error +#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */ sk_load_word: .globl sk_load_word + test %esi,%esi + js bpf_slow_path_word_neg + +sk_load_word_positive_offset: + .globl sk_load_word_positive_offset + mov %r9d,%eax # hlen sub %esi,%eax # hlen - offset cmp $3,%eax @@ -37,16 +37,15 @@ sk_load_word: bswap %eax /* ntohl() */ ret - -sk_load_half_ind: - .globl sk_load_half_ind - - add %ebx,%esi /* offset += X */ - js bpf_error - sk_load_half: .globl sk_load_half + test %esi,%esi + js bpf_slow_path_half_neg + +sk_load_half_positive_offset: + .globl sk_load_half_positive_offset + mov %r9d,%eax sub %esi,%eax # hlen - offset cmp $1,%eax @@ -55,14 +54,15 @@ sk_load_half: rol $8,%ax # ntohs() ret -sk_load_byte_ind: - .globl sk_load_byte_ind - add %ebx,%esi /* offset += X */ - js bpf_error - sk_load_byte: .globl sk_load_byte + test %esi,%esi + js bpf_slow_path_byte_neg + +sk_load_byte_positive_offset: + .globl sk_load_byte_positive_offset + cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ jle bpf_slow_path_byte movzbl (SKBDATA,%rsi),%eax @@ -73,25 +73,21 @@ sk_load_byte: * * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf) * Must preserve A accumulator (%eax) - * Inputs : %esi is the offset value, already known positive + * Inputs : %esi is the offset value */ -ENTRY(sk_load_byte_msh) - CFI_STARTPROC +sk_load_byte_msh: + .globl sk_load_byte_msh + test %esi,%esi + js bpf_slow_path_byte_msh_neg + +sk_load_byte_msh_positive_offset: + .globl sk_load_byte_msh_positive_offset cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */ jle bpf_slow_path_byte_msh movzbl (SKBDATA,%rsi),%ebx and $15,%bl shl $2,%bl ret - CFI_ENDPROC -ENDPROC(sk_load_byte_msh) - -bpf_error: -# force a return 0 from jit handler - xor %eax,%eax - mov -8(%rbp),%rbx - leaveq - ret /* rsi contains offset and can be scratched */ #define bpf_slow_path_common(LEN) \ @@ -138,3 +134,67 @@ bpf_slow_path_byte_msh: shl $2,%al xchg %eax,%ebx ret + +#define sk_negative_common(SIZE) \ + push %rdi; /* save skb */ \ + push %r9; \ + push SKBDATA; \ +/* rsi already has offset */ \ + mov $SIZE,%ecx; /* size */ \ + call bpf_internal_load_pointer_neg_helper; \ + test %rax,%rax; \ + pop SKBDATA; \ + pop %r9; \ + pop %rdi; \ + jz bpf_error + + +bpf_slow_path_word_neg: + cmp SKF_MAX_NEG_OFF, %esi /* test range */ + jl bpf_error /* offset lower -> error */ +sk_load_word_negative_offset: + .globl sk_load_word_negative_offset + sk_negative_common(4) + mov (%rax), %eax + bswap %eax + ret + +bpf_slow_path_half_neg: + cmp SKF_MAX_NEG_OFF, %esi + jl bpf_error +sk_load_half_negative_offset: + .globl sk_load_half_negative_offset + sk_negative_common(2) + mov (%rax),%ax + rol $8,%ax + movzwl %ax,%eax + ret + +bpf_slow_path_byte_neg: + cmp SKF_MAX_NEG_OFF, %esi + jl bpf_error +sk_load_byte_negative_offset: + .globl sk_load_byte_negative_offset + sk_negative_common(1) + movzbl (%rax), %eax + ret + +bpf_slow_path_byte_msh_neg: + cmp SKF_MAX_NEG_OFF, %esi + jl bpf_error +sk_load_byte_msh_negative_offset: + .globl sk_load_byte_msh_negative_offset + xchg %eax,%ebx /* dont lose A , X is about to be scratched */ + sk_negative_common(1) + movzbl (%rax),%eax + and $15,%al + shl $2,%al + xchg %eax,%ebx + ret + +bpf_error: +# force a return 0 from jit handler + xor %eax,%eax + mov -8(%rbp),%rbx + leaveq + ret diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5671752f8d9c..0597f95b6da6 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -30,7 +30,10 @@ int bpf_jit_enable __read_mostly; * assembly code in arch/x86/net/bpf_jit.S */ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; -extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[]; +extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; +extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[]; +extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; +extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[]; static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@ -117,6 +120,8 @@ static inline void bpf_flush_icache(void *start, void *end) set_fs(old_fs); } +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) void bpf_jit_compile(struct sk_filter *fp) { @@ -289,7 +294,7 @@ void bpf_jit_compile(struct sk_filter *fp) EMIT2(0x24, K & 0xFF); /* and imm8,%al */ } else if (K >= 0xFFFF0000) { EMIT2(0x66, 0x25); /* and imm16,%ax */ - EMIT2(K, 2); + EMIT(K, 2); } else { EMIT1_off32(0x25, K); /* and imm32,%eax */ } @@ -473,44 +478,46 @@ void bpf_jit_compile(struct sk_filter *fp) #endif break; case BPF_S_LD_W_ABS: - func = sk_load_word; + func = CHOOSE_LOAD_FUNC(K, sk_load_word); common_load: seen |= SEEN_DATAREF; - if ((int)K < 0) { - /* Abort the JIT because __load_pointer() is needed. */ - goto out; - } t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call */ break; case BPF_S_LD_H_ABS: - func = sk_load_half; + func = CHOOSE_LOAD_FUNC(K, sk_load_half); goto common_load; case BPF_S_LD_B_ABS: - func = sk_load_byte; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); goto common_load; case BPF_S_LDX_B_MSH: - if ((int)K < 0) { - /* Abort the JIT because __load_pointer() is needed. */ - goto out; - } + func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); seen |= SEEN_DATAREF | SEEN_XREG; - t_offset = sk_load_byte_msh - (image + addrs[i]); + t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ break; case BPF_S_LD_W_IND: - func = sk_load_word_ind; + func = sk_load_word; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; t_offset = func - (image + addrs[i]); - EMIT1_off32(0xbe, K); /* mov imm32,%esi */ + if (K) { + if (is_imm8(K)) { + EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */ + } else { + EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */ + EMIT(K, 4); + } + } else { + EMIT2(0x89,0xde); /* mov %ebx,%esi */ + } EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ break; case BPF_S_LD_H_IND: - func = sk_load_half_ind; + func = sk_load_half; goto common_load_ind; case BPF_S_LD_B_IND: - func = sk_load_byte_ind; + func = sk_load_byte; goto common_load_ind; case BPF_S_JMP_JA: t_offset = addrs[i + K] - addrs[i]; diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 47936830968c..218cdb16163c 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -225,13 +225,13 @@ static void __restore_processor_state(struct saved_context *ctxt) fix_processor_context(); do_fpu_end(); + x86_platform.restore_sched_clock_state(); mtrr_bp_restore(); } /* Needed by apm.c */ void restore_processor_state(void) { - x86_platform.restore_sched_clock_state(); __restore_processor_state(&saved_context); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h new file mode 100644 index 000000000000..7d01b8c56c00 --- /dev/null +++ b/arch/x86/um/asm/barrier.h @@ -0,0 +1,75 @@ +#ifndef _ASM_UM_BARRIER_H_ +#define _ASM_UM_BARRIER_H_ + +#include <asm/asm.h> +#include <asm/segment.h> +#include <asm/cpufeature.h> +#include <asm/cmpxchg.h> +#include <asm/nops.h> + +#include <linux/kernel.h> +#include <linux/irqflags.h> + +/* + * Force strict CPU ordering. + * And yes, this is required on UP too when we're talking + * to devices. + */ +#ifdef CONFIG_X86_32 + +#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) +#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) +#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) + +#else /* CONFIG_X86_32 */ + +#define mb() asm volatile("mfence" : : : "memory") +#define rmb() asm volatile("lfence" : : : "memory") +#define wmb() asm volatile("sfence" : : : "memory") + +#endif /* CONFIG_X86_32 */ + +#define read_barrier_depends() do { } while (0) + +#ifdef CONFIG_SMP + +#define smp_mb() mb() +#ifdef CONFIG_X86_PPRO_FENCE +#define smp_rmb() rmb() +#else /* CONFIG_X86_PPRO_FENCE */ +#define smp_rmb() barrier() +#endif /* CONFIG_X86_PPRO_FENCE */ + +#ifdef CONFIG_X86_OOSTORE +#define smp_wmb() wmb() +#else /* CONFIG_X86_OOSTORE */ +#define smp_wmb() barrier() +#endif /* CONFIG_X86_OOSTORE */ + +#define smp_read_barrier_depends() read_barrier_depends() +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) + +#else /* CONFIG_SMP */ + +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#define set_mb(var, value) do { var = value; barrier(); } while (0) + +#endif /* CONFIG_SMP */ + +/* + * Stop RDTSC speculation. This is needed when you need to use RDTSC + * (or get_cycles or vread that possibly accesses the TSC) in a defined + * code region. + * + * (Could use an alternative three way for this if there was one.) + */ +static inline void rdtsc_barrier(void) +{ + alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); +} + +#endif diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h deleted file mode 100644 index a459fd9b7598..000000000000 --- a/arch/x86/um/asm/system.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef _ASM_X86_SYSTEM_H_ -#define _ASM_X86_SYSTEM_H_ - -#include <asm/asm.h> -#include <asm/segment.h> -#include <asm/cpufeature.h> -#include <asm/cmpxchg.h> -#include <asm/nops.h> - -#include <linux/kernel.h> -#include <linux/irqflags.h> - -/* entries in ARCH_DLINFO: */ -#ifdef CONFIG_IA32_EMULATION -# define AT_VECTOR_SIZE_ARCH 2 -#else -# define AT_VECTOR_SIZE_ARCH 1 -#endif - -extern unsigned long arch_align_stack(unsigned long sp); - -void default_idle(void); - -/* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. - */ -#ifdef CONFIG_X86_32 -/* - * Some non-Intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) -#else -#define mb() asm volatile("mfence":::"memory") -#define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence" ::: "memory") -#endif - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#ifdef CONFIG_X86_PPRO_FENCE -# define smp_rmb() rmb() -#else -# define smp_rmb() barrier() -#endif -#ifdef CONFIG_X86_OOSTORE -# define smp_wmb() wmb() -#else -# define smp_wmb() barrier() -#endif -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif - -/* - * Stop RDTSC speculation. This is needed when you need to use RDTSC - * (or get_cycles or vread that possibly accesses the TSC) in a defined - * code region. - * - * (Could use an alternative three way for this if there was one.) - */ -static inline void rdtsc_barrier(void) -{ - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); - alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); -} - -extern void *_switch_to(void *prev, void *next, void *last); -#define switch_to(prev, next, last) prev = _switch_to(prev, next, last) - -#endif diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 988828b479ed..b8e279479a6b 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1859,6 +1859,7 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, #endif /* CONFIG_X86_64 */ static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; +static unsigned char fake_ioapic_mapping[PAGE_SIZE] __page_aligned_bss; static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) { @@ -1899,7 +1900,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) * We just don't map the IO APIC - all access is via * hypercalls. Keep the address in the pte for reference. */ - pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); + pte = pfn_pte(PFN_DOWN(__pa(fake_ioapic_mapping)), PAGE_KERNEL); break; #endif @@ -2064,6 +2065,7 @@ void __init xen_init_mmu_ops(void) pv_mmu_ops = xen_mmu_ops; memset(dummy_mapping, 0xff, PAGE_SIZE); + memset(fake_ioapic_mapping, 0xfd, PAGE_SIZE); } /* Protected by xen_reservation_lock. */ diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index b480d4207a4c..967633ad98c4 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -12,8 +12,8 @@ int xen_swiotlb __read_mostly; static struct dma_map_ops xen_swiotlb_dma_ops = { .mapping_error = xen_swiotlb_dma_mapping_error, - .alloc_coherent = xen_swiotlb_alloc_coherent, - .free_coherent = xen_swiotlb_free_coherent, + .alloc = xen_swiotlb_alloc_coherent, + .free = xen_swiotlb_free_coherent, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, .sync_single_for_device = xen_swiotlb_sync_single_for_device, .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 02900e8ce26c..5fac6919b957 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -59,7 +59,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) static void __cpuinit cpu_bringup(void) { - int cpu = smp_processor_id(); + int cpu; cpu_init(); touch_softlockup_watchdog(); |