diff options
author | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-10-12 17:44:33 +0400 |
---|---|---|
committer | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-10-12 17:44:33 +0400 |
commit | 0d62950125241a6e6db8e8f14271f098ec7a2da4 (patch) | |
tree | 8cdd9e17f6a6ff4cb6166ad12a4d3ed1d45b2dc9 | |
parent | b3bc2c5562f06ca34b30f61c5714e96490946c81 (diff) | |
parent | 5e7184ae0dd49456387e8b1cdebc6b2c92fc6d51 (diff) | |
download | linux-0d62950125241a6e6db8e8f14271f098ec7a2da4.tar.xz |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/hskinnemoen/atmel-mci-2.6.28
231 files changed, 3590 insertions, 1861 deletions
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt index b463ecd0c7ce..c74fec8c2351 100644 --- a/Documentation/DMA-mapping.txt +++ b/Documentation/DMA-mapping.txt @@ -740,7 +740,7 @@ failure can be determined by: dma_addr_t dma_handle; dma_handle = pci_map_single(pdev, addr, size, direction); - if (pci_dma_mapping_error(dma_handle)) { + if (pci_dma_mapping_error(pdev, dma_handle)) { /* * reduce current DMA mapping usage, * delay and try again later or diff --git a/Documentation/HOWTO b/Documentation/HOWTO index c2371c5a98f9..48a3955f05fc 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO @@ -77,7 +77,8 @@ documentation files are also added which explain how to use the feature. When a kernel change causes the interface that the kernel exposes to userspace to change, it is recommended that you send the information or a patch to the manual pages explaining the change to the manual pages -maintainer at mtk.manpages@gmail.com. +maintainer at mtk.manpages@gmail.com, and CC the list +linux-api@vger.kernel.org. Here is a list of files that are in the kernel source tree that are required reading: diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist index da10e0714241..21f0795af20f 100644 --- a/Documentation/SubmitChecklist +++ b/Documentation/SubmitChecklist @@ -67,6 +67,8 @@ kernel patches. 19: All new userspace interfaces are documented in Documentation/ABI/. See Documentation/ABI/README for more information. + Patches that change userspace interfaces should be CCed to + linux-api@vger.kernel.org. 20: Check that it all passes `make headers_check'. diff --git a/Documentation/ioctl/cdrom.txt b/Documentation/ioctl/cdrom.txt index 62d4af44ec4a..59df81c8da2b 100644 --- a/Documentation/ioctl/cdrom.txt +++ b/Documentation/ioctl/cdrom.txt @@ -271,14 +271,14 @@ CDROMCLOSETRAY pendant of CDROMEJECT usage: - ioctl(fd, CDROMEJECT, 0); + ioctl(fd, CDROMCLOSETRAY, 0); inputs: none outputs: none error returns: - ENOSYS cd drive not capable of ejecting + ENOSYS cd drive not capable of closing the tray EBUSY other processes are accessing drive, or door is locked notes: diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 276a7e637822..e1ff0d920a5c 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -351,9 +351,10 @@ kernel. This value defaults to SHMMAX. softlockup_thresh: -This value can be used to lower the softlockup tolerance -threshold. The default threshold is 10s. If a cpu is locked up -for 10s, the kernel complains. Valid values are 1-60s. +This value can be used to lower the softlockup tolerance threshold. The +default threshold is 60 seconds. If a cpu is locked up for 60 seconds, +the kernel complains. Valid values are 1-60 seconds. Setting this +tunable to zero will disable the softlockup detection altogether. ============================================================== diff --git a/Documentation/usb/anchors.txt b/Documentation/usb/anchors.txt index 7304bcf5a306..5e6b64c20d25 100644 --- a/Documentation/usb/anchors.txt +++ b/Documentation/usb/anchors.txt @@ -42,9 +42,21 @@ This function kills all URBs associated with an anchor. The URBs are called in the reverse temporal order they were submitted. This way no data can be reordered. +usb_unlink_anchored_urbs() +-------------------------- + +This function unlinks all URBs associated with an anchor. The URBs +are processed in the reverse temporal order they were submitted. +This is similar to usb_kill_anchored_urbs(), but it will not sleep. +Therefore no guarantee is made that the URBs have been unlinked when +the call returns. They may be unlinked later but will be unlinked in +finite time. + usb_wait_anchor_empty_timeout() ------------------------------- This function waits for all URBs associated with an anchor to finish or a timeout, whichever comes first. Its return value will tell you whether the timeout was reached. + + diff --git a/MAINTAINERS b/MAINTAINERS index cad81a24e832..8dae4555f10e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1198,9 +1198,7 @@ M: hpa@zytor.com S: Maintained CPUSETS -P: Paul Jackson P: Paul Menage -M: pj@sgi.com M: menage@google.com L: linux-kernel@vger.kernel.org W: http://www.bullopensource.org/cpuset/ @@ -1984,7 +1982,7 @@ S: Maintained I2C/SMBUS STUB DRIVER P: Mark M. Hoffman M: mhoffman@lightlink.com -L: lm-sensors@lm-sensors.org +L: i2c@lm-sensors.org S: Maintained I2C SUBSYSTEM @@ -2706,6 +2704,7 @@ MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 P: Michael Kerrisk M: mtk.manpages@gmail.com W: http://www.kernel.org/doc/man-pages +L: linux-man@vger.kernel.org S: Supported MARVELL LIBERTAS WIRELESS DRIVER @@ -3726,7 +3725,7 @@ S: Maintained SIS 96X I2C/SMBUS DRIVER P: Mark M. Hoffman M: mhoffman@lightlink.com -L: lm-sensors@lm-sensors.org +L: i2c@lm-sensors.org S: Maintained SIS FRAMEBUFFER DRIVER @@ -3833,11 +3832,12 @@ S: Maintained SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT P: Liam Girdwood -M: liam.girdwood@wolfsonmicro.com +M: lrg@slimlogic.co.uk P: Mark Brown M: broonie@opensource.wolfsonmicro.com T: git opensource.wolfsonmicro.com/linux-2.6-asoc L: alsa-devel@alsa-project.org (subscribers-only) +W: http://alsa-project.org/main/index.php/ASoC S: Supported SPI SUBSYSTEM @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 27 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc8 NAME = Rotary Wombat # *DOCUMENTATION* diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index aaffaecffcd1..ba8ccfede964 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -111,8 +111,6 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, case 'D': case 'k': case 'c': - kgdb_contthread = NULL; - /* * Try to read optional parameter, pc unchanged if no parm. * If this was a compiled breakpoint, we need to move diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 67e18509d7bf..b0d6b32654cf 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c @@ -17,9 +17,9 @@ #include <linux/interrupt.h> #include <linux/clockchips.h> #include <linux/sched.h> +#include <linux/cnt32_to_63.h> #include <asm/div64.h> -#include <asm/cnt32_to_63.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include <mach/pxa-regs.h> diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 1362994c78aa..b422526f6d8b 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -18,9 +18,9 @@ #include <linux/ioport.h> #include <linux/sched.h> /* just for sched_clock() - funny that */ #include <linux/platform_device.h> +#include <linux/cnt32_to_63.h> #include <asm/div64.h> -#include <asm/cnt32_to_63.h> #include <mach/hardware.h> #include <asm/system.h> #include <asm/pgtable.h> diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index d75e795c893e..b638f10411e8 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -28,8 +28,8 @@ #include <linux/amba/clcd.h> #include <linux/clocksource.h> #include <linux/clockchips.h> +#include <linux/cnt32_to_63.h> -#include <asm/cnt32_to_63.h> #include <asm/system.h> #include <mach/hardware.h> #include <asm/io.h> diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c index abcb0d9559b1..65db3d266965 100644 --- a/arch/avr32/boards/atngw100/setup.c +++ b/arch/avr32/boards/atngw100/setup.c @@ -54,8 +54,11 @@ static struct spi_board_info spi0_board_info[] __initdata = { }; static struct mci_platform_data __initdata mci0_data = { - .detect_pin = GPIO_PIN_PC(25), - .wp_pin = GPIO_PIN_PE(0), + .slot[0] = { + .bus_width = 4, + .detect_pin = GPIO_PIN_PC(25), + .wp_pin = GPIO_PIN_PE(0), + }, }; /* diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c index cccca241fae9..32aceec0ae72 100644 --- a/arch/avr32/boards/atstk1000/atstk1002.c +++ b/arch/avr32/boards/atstk1000/atstk1002.c @@ -264,16 +264,20 @@ void __init setup_board(void) #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM +static struct mci_platform_data __initdata mci0_data = { + .slot[0] = { + .bus_width = 4, + /* MMC card detect requires MACB0 *NOT* be used */ #ifdef CONFIG_BOARD_ATSTK1002_SW6_CUSTOM -static struct mci_platform_data __initdata mci0_data = { - .detect_pin = GPIO_PIN_PC(14), /* gpio30/sdcd */ - .wp_pin = GPIO_PIN_PC(15), /* gpio31/sdwp */ -}; -#define MCI_PDATA &mci0_data + .detect_pin = GPIO_PIN_PC(14), /* gpio30/sdcd */ + .wp_pin = GPIO_PIN_PC(15), /* gpio31/sdwp */ #else -#define MCI_PDATA NULL + .detect_pin = -ENODEV, + .wp_pin = -ENODEV, #endif /* SW6 for sd{cd,wp} routing */ + }, +}; #endif /* SW2 for MMC signal routing */ @@ -326,7 +330,7 @@ static int __init atstk1002_init(void) at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); #endif #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM - at32_add_device_mci(0, MCI_PDATA); + at32_add_device_mci(0, &mci0_pdata); #endif #ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM set_hw_addr(at32_add_device_eth(1, ð_data[1])); diff --git a/arch/avr32/boards/atstk1000/atstk1003.c b/arch/avr32/boards/atstk1000/atstk1003.c index 0cf664174c17..acc61235b895 100644 --- a/arch/avr32/boards/atstk1000/atstk1003.c +++ b/arch/avr32/boards/atstk1000/atstk1003.c @@ -66,6 +66,16 @@ static struct spi_board_info spi1_board_info[] __initdata = { { } }; #endif +#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM +static struct mci_platform_data __initdata mci0_data = { + .slot[0] = { + .bus_width = 4, + .detect_pin = -ENODEV, + .wp_pin = -ENODEV, + }, +}; +#endif + #ifdef CONFIG_BOARD_ATSTK1000_EXTDAC static void __init atstk1003_setup_extdac(void) { @@ -154,7 +164,7 @@ static int __init atstk1003_init(void) at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); #endif #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM - at32_add_device_mci(0, NULL); + at32_add_device_mci(0, &mci0_data); #endif at32_add_device_usba(0, NULL); #ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM diff --git a/arch/avr32/boards/atstk1000/atstk1004.c b/arch/avr32/boards/atstk1000/atstk1004.c index 2c072cd0c22e..949c13ec51ed 100644 --- a/arch/avr32/boards/atstk1000/atstk1004.c +++ b/arch/avr32/boards/atstk1000/atstk1004.c @@ -71,6 +71,16 @@ static struct spi_board_info spi1_board_info[] __initdata = { { } }; #endif +#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM +static struct mci_platform_data __initdata mci0_data = { + .slot[0] = { + .bus_width = 4, + .detect_pin = -ENODEV, + .wp_pin = -ENODEV, + }, +}; +#endif + #ifdef CONFIG_BOARD_ATSTK1000_EXTDAC static void __init atstk1004_setup_extdac(void) { @@ -137,7 +147,7 @@ static int __init atstk1004_init(void) at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); #endif #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM - at32_add_device_mci(0, NULL); + at32_add_device_mci(0, &mci0_data); #endif at32_add_device_lcdc(0, &atstk1000_lcdc_data, fbmem_start, fbmem_size, diff --git a/arch/avr32/include/asm/atmel-mci.h b/arch/avr32/include/asm/atmel-mci.h index c2ea6e1c9aa1..59f3fadd0b68 100644 --- a/arch/avr32/include/asm/atmel-mci.h +++ b/arch/avr32/include/asm/atmel-mci.h @@ -1,9 +1,39 @@ #ifndef __ASM_AVR32_ATMEL_MCI_H #define __ASM_AVR32_ATMEL_MCI_H -struct mci_platform_data { +#define ATMEL_MCI_MAX_NR_SLOTS 2 + +struct dma_slave; + +/** + * struct mci_slot_pdata - board-specific per-slot configuration + * @bus_width: Number of data lines wired up the slot + * @detect_pin: GPIO pin wired to the card detect switch + * @wp_pin: GPIO pin wired to the write protect sensor + * + * If a given slot is not present on the board, @bus_width should be + * set to 0. The other fields are ignored in this case. + * + * Any pins that aren't available should be set to a negative value. + * + * Note that support for multiple slots is experimental -- some cards + * might get upset if we don't get the clock management exactly right. + * But in most cases, it should work just fine. + */ +struct mci_slot_pdata { + unsigned int bus_width; int detect_pin; int wp_pin; }; +/** + * struct mci_platform_data - board-specific MMC/SDcard configuration + * @dma_slave: DMA slave interface to use in data transfers, or NULL. + * @slot: Per-slot configuration data. + */ +struct mci_platform_data { + struct dma_slave *dma_slave; + struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; +}; + #endif /* __ASM_AVR32_ATMEL_MCI_H */ diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index fd306c49194b..5d00bb8d3cc2 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -1272,10 +1272,14 @@ static struct clk atmel_mci0_pclk = { struct platform_device *__init at32_add_device_mci(unsigned int id, struct mci_platform_data *data) { - struct mci_platform_data _data; struct platform_device *pdev; + struct dw_dma_slave *dws; - if (id != 0) + if (id != 0 || !data) + return NULL; + + /* Must have at least one usable slot */ + if (!data->slot[0].bus_width && !data->slot[1].bus_width) return NULL; pdev = platform_device_alloc("atmel_mci", id); @@ -1286,28 +1290,76 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) ARRAY_SIZE(atmel_mci0_resource))) goto fail; - if (!data) { - data = &_data; - memset(data, -1, sizeof(struct mci_platform_data)); - data->detect_pin = GPIO_PIN_NONE; - data->wp_pin = GPIO_PIN_NONE; - } + if (data->dma_slave) + dws = kmemdup(to_dw_dma_slave(data->dma_slave), + sizeof(struct dw_dma_slave), GFP_KERNEL); + else + dws = kzalloc(sizeof(struct dw_dma_slave), GFP_KERNEL); + + dws->slave.dev = &pdev->dev; + dws->slave.dma_dev = &dw_dmac0_device.dev; + dws->slave.reg_width = DMA_SLAVE_WIDTH_32BIT; + dws->cfg_hi = (DWC_CFGH_SRC_PER(0) + | DWC_CFGH_DST_PER(1)); + dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL + | DWC_CFGL_HS_SRC_POL); + + data->dma_slave = &dws->slave; if (platform_device_add_data(pdev, data, sizeof(struct mci_platform_data))) goto fail; - select_peripheral(PA(10), PERIPH_A, 0); /* CLK */ - select_peripheral(PA(11), PERIPH_A, 0); /* CMD */ - select_peripheral(PA(12), PERIPH_A, 0); /* DATA0 */ - select_peripheral(PA(13), PERIPH_A, 0); /* DATA1 */ - select_peripheral(PA(14), PERIPH_A, 0); /* DATA2 */ - select_peripheral(PA(15), PERIPH_A, 0); /* DATA3 */ + /* CLK line is common to both slots */ + select_peripheral(PA(10), PERIPH_A, 0); - if (gpio_is_valid(data->detect_pin)) - at32_select_gpio(data->detect_pin, 0); - if (gpio_is_valid(data->wp_pin)) - at32_select_gpio(data->wp_pin, 0); + switch (data->slot[0].bus_width) { + case 4: + select_peripheral(PA(13), PERIPH_A, 0); /* DATA1 */ + select_peripheral(PA(14), PERIPH_A, 0); /* DATA2 */ + select_peripheral(PA(15), PERIPH_A, 0); /* DATA3 */ + /* fall through */ + case 1: + select_peripheral(PA(11), PERIPH_A, 0); /* CMD */ + select_peripheral(PA(12), PERIPH_A, 0); /* DATA0 */ + + if (gpio_is_valid(data->slot[0].detect_pin)) + at32_select_gpio(data->slot[0].detect_pin, 0); + if (gpio_is_valid(data->slot[0].wp_pin)) + at32_select_gpio(data->slot[0].wp_pin, 0); + break; + case 0: + /* Slot is unused */ + break; + default: + goto fail; + } + + switch (data->slot[1].bus_width) { + case 4: + select_peripheral(PB(8), PERIPH_B, 0); /* DATA1 */ + select_peripheral(PB(9), PERIPH_B, 0); /* DATA2 */ + select_peripheral(PB(10), PERIPH_B, 0); /* DATA3 */ + /* fall through */ + case 1: + select_peripheral(PB(6), PERIPH_B, 0); /* CMD */ + select_peripheral(PB(7), PERIPH_B, 0); /* DATA0 */ + + if (gpio_is_valid(data->slot[1].detect_pin)) + at32_select_gpio(data->slot[1].detect_pin, 0); + if (gpio_is_valid(data->slot[1].wp_pin)) + at32_select_gpio(data->slot[1].wp_pin, 0); + break; + case 0: + /* Slot is unused */ + break; + default: + if (!data->slot[0].bus_width) + goto fail; + + data->slot[1].bus_width = 0; + break; + } atmel_mci0_pclk.dev = &pdev->dev; diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index f66799891036..1a873b36a4a1 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h @@ -11,6 +11,9 @@ #include <asm-generic/sections.h> extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; +#ifdef CONFIG_SMP +extern char __cpu0_per_cpu[]; +#endif extern char __start___vtop_patchlist[], __end___vtop_patchlist[]; extern char __start___rse_patchlist[], __end___rse_patchlist[]; extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[]; diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index d45f215bc8fc..51b75cea7018 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -1232,9 +1232,10 @@ efi_initialize_iomem_resources(struct resource *code_resource, if (md->attribute & EFI_MEMORY_WP) { name = "System ROM"; flags |= IORESOURCE_READONLY; - } else { + } else if (md->attribute == EFI_MEMORY_UC) + name = "Uncached RAM"; + else name = "System RAM"; - } break; case EFI_ACPI_MEMORY_NVS: diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 8bdea8eb62e3..66e491d8baac 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -367,16 +367,17 @@ start_ap: ;; #else (isAP) br.few 2f - mov r20=r19 - sub r19=r19,r18 + movl r20=__cpu0_per_cpu ;; shr.u r18=r18,3 1: - ld8 r21=[r20],8;; - st8[r19]=r21,8 + ld8 r21=[r19],8;; + st8[r20]=r21,8 adds r18=-1,r18;; cmp4.lt p7,p6=0,r18 (p7) br.cond.dptk.few 1b + mov r19=r20 + ;; 2: #endif tpa r19=r19 diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index c27d5b2c182b..de636b215677 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -616,7 +616,9 @@ setup_arch (char **cmdline_p) ia64_mca_init(); platform_setup(cmdline_p); +#ifndef CONFIG_IA64_HP_SIM check_sal_cache_flush(); +#endif paging_init(); } diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index de71da811cd6..10a7d47e8510 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -215,9 +215,6 @@ SECTIONS /* Per-cpu data: */ percpu : { } :percpu . = ALIGN(PERCPU_PAGE_SIZE); -#ifdef CONFIG_SMP - . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ -#endif __phys_per_cpu_start = .; .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) { @@ -233,6 +230,11 @@ SECTIONS data : { } :data .data : AT(ADDR(.data) - LOAD_OFFSET) { +#ifdef CONFIG_SMP + . = ALIGN(PERCPU_PAGE_SIZE); + __cpu0_per_cpu = .; + . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ +#endif DATA_DATA *(.data1) *(.gnu.linkonce.d*) diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index e566ff43884a..0ee085efbe29 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -163,7 +163,7 @@ per_cpu_init (void) * get_zeroed_page(). */ if (first_time) { - void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; + void *cpu0_data = __cpu0_per_cpu; first_time=0; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 78026aabaa7f..d8c5fcd89e5b 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -144,7 +144,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node) for_each_possible_early_cpu(cpu) { if (cpu == 0) { - void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; + void *cpu0_data = __cpu0_per_cpu; __per_cpu_offset[cpu] = (char*)cpu0_data - __per_cpu_start; } else if (node == node_cpuid[cpu].nid) { diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index a5f864c445b2..f57113f1f892 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -216,10 +216,6 @@ config MEMORY_SIZE default "01000000" if PLAT_M32104UT default "00800000" if PLAT_OAKS32R -config NOHIGHMEM - bool - default y - config ARCH_DISCONTIGMEM_ENABLE bool "Internal RAM Support" depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104 @@ -410,11 +406,7 @@ config PCI_DIRECT source "drivers/pci/Kconfig" config ISA - bool "ISA support" - help - Find out whether you have ISA slots on your motherboard. ISA is the - name of a bus system, i.e. the way the CPU talks to the other stuff - inside your box. If you have ISA, say Y, otherwise N. + bool source "drivers/pcmcia/Kconfig" diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index d4eaa2fd1818..612d35b082a6 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S @@ -143,7 +143,7 @@ ret_from_intr: and3 r4, r4, #0x8000 ; check BSM bit #endif beqz r4, resume_kernel -ENTRY(resume_userspace) +resume_userspace: DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt ; setting need_resched or sigpending ; between sampling and the iret diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S index dab7436d7bbe..40180778a5c7 100644 --- a/arch/m32r/kernel/head.S +++ b/arch/m32r/kernel/head.S @@ -29,7 +29,6 @@ __INITDATA .global _end ENTRY(stext) ENTRY(_stext) -ENTRY(startup_32) /* Setup up the stack pointer */ LDIMM (r0, spi_stack_top) LDIMM (r1, spu_stack_top) diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index d0c5b0b7da2f..2aeae4670098 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -22,9 +22,6 @@ #include <linux/module.h> #include <asm/uaccess.h> -atomic_t irq_err_count; -atomic_t irq_mis_count; - /* * Generic, controller-independent functions: */ @@ -63,9 +60,6 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: spin_unlock_irqrestore(&irq_desc[i].lock, flags); - } else if (i == NR_IRQS) { - seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); - seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); } return 0; } diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index 16bcb189a383..22624b51d4d3 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c @@ -14,6 +14,7 @@ #include <asm/delay.h> #include <asm/irq.h> #include <asm/tlbflush.h> +#include <asm/pgtable.h> /* platform dependent support */ EXPORT_SYMBOL(boot_cpu_data); @@ -65,6 +66,7 @@ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(_inb); EXPORT_SYMBOL(_inw); diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index a689e2978b6e..5be4faaf5b1c 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c @@ -35,8 +35,6 @@ #include <linux/err.h> -static int hlt_counter=0; - /* * Return saved PC of a blocked thread. */ @@ -48,31 +46,16 @@ unsigned long thread_saved_pc(struct task_struct *tsk) /* * Powermanagement idle function, if any.. */ -void (*pm_idle)(void) = NULL; -EXPORT_SYMBOL(pm_idle); +static void (*pm_idle)(void) = NULL; void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); -void disable_hlt(void) -{ - hlt_counter++; -} - -EXPORT_SYMBOL(disable_hlt); - -void enable_hlt(void) -{ - hlt_counter--; -} - -EXPORT_SYMBOL(enable_hlt); - /* * We use this is we don't have any better * idle routine.. */ -void default_idle(void) +static void default_idle(void) { /* M32R_FIXME: Please use "cpu_sleep" mode. */ cpu_relax(); @@ -260,15 +243,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long spu, return 0; } -/* - * Capture the user space registers if the task is not running (in user space) - */ -int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) -{ - /* M32R_FIXME */ - return 1; -} - asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2, unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, struct pt_regs regs) diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 7577f971ea4e..929e5c9d3ad9 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -84,7 +84,7 @@ void smp_send_timer(void); void smp_ipi_timer_interrupt(struct pt_regs *); void smp_local_timer_interrupt(void); -void send_IPI_allbutself(int, int); +static void send_IPI_allbutself(int, int); static void send_IPI_mask(cpumask_t, int, int); unsigned long send_IPI_mask_phys(cpumask_t, int, int); @@ -722,7 +722,7 @@ void smp_local_timer_interrupt(void) * ---------- --- -------------------------------------------------------- * *==========================================================================*/ -void send_IPI_allbutself(int ipi_num, int try) +static void send_IPI_allbutself(int ipi_num, int try) { cpumask_t cpumask; diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index 994cc1556355..6ea017727cce 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -34,7 +34,6 @@ #include <asm/hw_irq.h> #ifdef CONFIG_SMP -extern void send_IPI_allbutself(int, int); extern void smp_local_timer_interrupt(void); #endif @@ -188,7 +187,7 @@ static long last_rtc_update = 0; * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick */ -irqreturn_t timer_interrupt(int irq, void *dev_id) +static irqreturn_t timer_interrupt(int irq, void *dev_id) { #ifndef CONFIG_SMP profile_tick(CPU_PROFILING); @@ -228,7 +227,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -struct irqaction irq0 = { +static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED, .mask = CPU_MASK_NONE, diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 46159a4e644b..03b14e55cd89 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -61,7 +61,7 @@ extern unsigned long eit_vector[]; ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \ + 0xff000000UL -void set_eit_vector_entries(void) +static void set_eit_vector_entries(void) { extern void default_eit_handler(void); extern void system_call(void); @@ -121,9 +121,9 @@ void __init trap_init(void) cpu_init(); } -int kstack_depth_to_print = 24; +static int kstack_depth_to_print = 24; -void show_trace(struct task_struct *task, unsigned long *stack) +static void show_trace(struct task_struct *task, unsigned long *stack) { unsigned long addr; @@ -224,7 +224,7 @@ bad: printk("\n"); } -DEFINE_SPINLOCK(die_lock); +static DEFINE_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) { diff --git a/arch/m32r/lib/delay.c b/arch/m32r/lib/delay.c index 59bfc34e0d9f..ced549be80f5 100644 --- a/arch/m32r/lib/delay.c +++ b/arch/m32r/lib/delay.c @@ -6,6 +6,7 @@ */ #include <linux/param.h> +#include <linux/module.h> #ifdef CONFIG_SMP #include <linux/sched.h> #include <asm/current.h> @@ -121,3 +122,4 @@ void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ } +EXPORT_SYMBOL(__ndelay); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 49896a2a1d72..c930b8ceb418 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1403,7 +1403,6 @@ config MIPS_MT_SMTC depends on CPU_MIPS32_R2 #depends on CPU_MIPS64_R2 # once there is hardware ... depends on SYS_SUPPORTS_MULTITHREADING - select GENERIC_CLOCKEVENTS_BROADCAST select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select MIPS_MT @@ -1451,32 +1450,17 @@ config MIPS_VPE_LOADER Includes a loader for loading an elf relocatable object onto another VPE and running it. -config MIPS_MT_SMTC_INSTANT_REPLAY - bool "Low-latency Dispatch of Deferred SMTC IPIs" - depends on MIPS_MT_SMTC && !PREEMPT - default y - help - SMTC pseudo-interrupts between TCs are deferred and queued - if the target TC is interrupt-inhibited (IXMT). In the first - SMTC prototypes, these queued IPIs were serviced on return - to user mode, or on entry into the kernel idle loop. The - INSTANT_REPLAY option dispatches them as part of local_irq_restore() - processing, which adds runtime overhead (hence the option to turn - it off), but ensures that IPIs are handled promptly even under - heavy I/O interrupt load. - config MIPS_MT_SMTC_IM_BACKSTOP bool "Use per-TC register bits as backstop for inhibited IM bits" depends on MIPS_MT_SMTC - default y + default n help To support multiple TC microthreads acting as "CPUs" within a VPE, VPE-wide interrupt mask bits must be specially manipulated during interrupt handling. To support legacy drivers and interrupt controller management code, SMTC has a "backstop" to track and if necessary restore the interrupt mask. This has some performance - impact on interrupt service overhead. Disable it only if you know - what you are doing. + impact on interrupt service overhead. config MIPS_MT_SMTC_IRQAFF bool "Support IRQ affinity API" @@ -1486,10 +1470,8 @@ config MIPS_MT_SMTC_IRQAFF Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) for SMTC Linux kernel. Requires platform support, of which an example can be found in the MIPS kernel i8259 and Malta - platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY - be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to - interrupt dispatch, and should be used only if you know what - you are doing. + platform code. Adds some overhead to interrupt dispatch, and + should be used only if you know what you are doing. config MIPS_VPE_LOADER_TOM bool "Load VPE program into memory hidden from linux" diff --git a/arch/mips/au1000/common/gpio.c b/arch/mips/au1000/common/gpio.c index b485d94ce8a5..e660ddd611c4 100644 --- a/arch/mips/au1000/common/gpio.c +++ b/arch/mips/au1000/common/gpio.c @@ -48,7 +48,7 @@ static void au1xxx_gpio2_write(unsigned gpio, int value) { gpio -= AU1XXX_GPIO_BASE; - gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | (value << gpio); + gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio); } static int au1xxx_gpio2_direction_input(unsigned gpio) @@ -61,7 +61,8 @@ static int au1xxx_gpio2_direction_input(unsigned gpio) static int au1xxx_gpio2_direction_output(unsigned gpio, int value) { gpio -= AU1XXX_GPIO_BASE; - gpio2->dir = (0x01 << gpio) | (value << gpio); + gpio2->dir |= 0x01 << gpio; + gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio); return 0; } @@ -90,6 +91,7 @@ static int au1xxx_gpio1_direction_input(unsigned gpio) static int au1xxx_gpio1_direction_output(unsigned gpio, int value) { gpio1->trioutclr = (0x01 & gpio); + au1xxx_gpio1_write(gpio, value); return 0; } diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 706f93974797..25775cb54000 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o +obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 24a2d907aa0d..4a4c59f2737a 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -12,6 +12,14 @@ #include <asm/smtc_ipi.h> #include <asm/time.h> +#include <asm/cevt-r4k.h> + +/* + * The SMTC Kernel for the 34K, 1004K, et. al. replaces several + * of these routines with SMTC-specific variants. + */ + +#ifndef CONFIG_MIPS_MT_SMTC static int mips_next_event(unsigned long delta, struct clock_event_device *evt) @@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, unsigned int cnt; int res; -#ifdef CONFIG_MIPS_MT_SMTC - { - unsigned long flags, vpflags; - local_irq_save(flags); - vpflags = dvpe(); -#endif cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; -#ifdef CONFIG_MIPS_MT_SMTC - evpe(vpflags); - local_irq_restore(flags); - } -#endif return res; } -static void mips_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +#endif /* CONFIG_MIPS_MT_SMTC */ + +void mips_set_clock_mode(enum clock_event_mode mode, + struct clock_event_device *evt) { /* Nothing to do ... */ } -static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); -static int cp0_timer_irq_installed; +DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); +int cp0_timer_irq_installed; -/* - * Timer ack for an R4k-compatible timer of a known frequency. - */ -static void c0_timer_ack(void) -{ - write_c0_compare(read_c0_compare()); -} +#ifndef CONFIG_MIPS_MT_SMTC -/* - * Possibly handle a performance counter interrupt. - * Return true if the timer interrupt should not be checked - */ -static inline int handle_perf_irq(int r2) -{ - /* - * The performance counter overflow interrupt may be shared with the - * timer interrupt (cp0_perfcount_irq < 0). If it is and a - * performance counter has overflowed (perf_irq() == IRQ_HANDLED) - * and we can't reliably determine if a counter interrupt has also - * happened (!r2) then don't check for a timer interrupt. - */ - return (cp0_perfcount_irq < 0) && - perf_irq() == IRQ_HANDLED && - !r2; -} - -static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; struct clock_event_device *cd; @@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) * interrupt. Being the paranoiacs we are we check anyway. */ if (!r2 || (read_c0_cause() & (1 << 30))) { - c0_timer_ack(); -#ifdef CONFIG_MIPS_MT_SMTC - if (cpu_data[cpu].vpe_id) - goto out; - cpu = 0; -#endif + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); cd->event_handler(cd); } @@ -107,65 +78,16 @@ out: return IRQ_HANDLED; } -static struct irqaction c0_compare_irqaction = { +#endif /* Not CONFIG_MIPS_MT_SMTC */ + +struct irqaction c0_compare_irqaction = { .handler = c0_compare_interrupt, -#ifdef CONFIG_MIPS_MT_SMTC - .flags = IRQF_DISABLED, -#else .flags = IRQF_DISABLED | IRQF_PERCPU, -#endif .name = "timer", }; -#ifdef CONFIG_MIPS_MT_SMTC -DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); - -static void smtc_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ -} - -static void mips_broadcast(cpumask_t mask) -{ - unsigned int cpu; - - for_each_cpu_mask(cpu, mask) - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); -} - -static void setup_smtc_dummy_clockevent_device(void) -{ - //uint64_t mips_freq = mips_hpt_^frequency; - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - cd = &per_cpu(smtc_dummy_clockevent_device, cpu); - - cd->name = "SMTC"; - cd->features = CLOCK_EVT_FEAT_DUMMY; - - /* Calculate the min / max delta */ - cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); - cd->shift = 0; //32; - cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); - - cd->rating = 200; - cd->irq = 17; //-1; -// if (cpu) -// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); -// else - cd->cpumask = cpumask_of_cpu(cpu); - - cd->set_mode = smtc_set_mode; - - cd->broadcast = mips_broadcast; - - clockevents_register_device(cd); -} -#endif - -static void mips_event_handler(struct clock_event_device *dev) +void mips_event_handler(struct clock_event_device *dev) { } @@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) return (read_c0_cause() >> cp0_compare_irq) & 0x100; } -static int c0_compare_int_usable(void) +/* + * Compare interrupt can be routed and latched outside the core, + * so a single execution hazard barrier may not be enough to give + * it time to clear as seen in the Cause register. 4 time the + * pipeline depth seems reasonably conservative, and empirically + * works better in configurations with high CPU/bus clock ratios. + */ + +#define compare_change_hazard() \ + do { \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + } while (0) + +int c0_compare_int_usable(void) { unsigned int delta; unsigned int cnt; @@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) */ if (c0_compare_int_pending()) { write_c0_compare(read_c0_count()); - irq_disable_hazard(); + compare_change_hazard(); if (c0_compare_int_pending()) return 0; } @@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); - irq_disable_hazard(); + compare_change_hazard(); if ((int)(read_c0_count() - cnt) < 0) break; /* increase delta if the timer was already expired */ @@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) while ((int)(read_c0_count() - cnt) <= 0) ; /* Wait for expiry */ + compare_change_hazard(); if (!c0_compare_int_pending()) return 0; write_c0_compare(read_c0_count()); - irq_disable_hazard(); + compare_change_hazard(); if (c0_compare_int_pending()) return 0; @@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) return 1; } +#ifndef CONFIG_MIPS_MT_SMTC + int __cpuinit mips_clockevent_init(void) { uint64_t mips_freq = mips_hpt_frequency; @@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) if (!cpu_has_counter || !mips_hpt_frequency) return -ENXIO; -#ifdef CONFIG_MIPS_MT_SMTC - setup_smtc_dummy_clockevent_device(); - - /* - * On SMTC we only register VPE0's compare interrupt as clockevent - * device. - */ - if (cpu) - return 0; -#endif - if (!c0_compare_int_usable()) return -ENXIO; @@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) cd->rating = 300; cd->irq = irq; -#ifdef CONFIG_MIPS_MT_SMTC - cd->cpumask = CPU_MASK_ALL; -#else cd->cpumask = cpumask_of_cpu(cpu); -#endif cd->set_next_event = mips_next_event; - cd->set_mode = mips_set_mode; + cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; clockevents_register_device(cd); @@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) cp0_timer_irq_installed = 1; -#ifdef CONFIG_MIPS_MT_SMTC -#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) - setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); -#else setup_irq(irq, &c0_compare_irqaction); -#endif return 0; } + +#endif /* Not CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c new file mode 100644 index 000000000000..5162fe4b5952 --- /dev/null +++ b/arch/mips/kernel/cevt-smtc.c @@ -0,0 +1,321 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 MIPS Technologies, Inc. + * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl + */ +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/percpu.h> + +#include <asm/smtc_ipi.h> +#include <asm/time.h> +#include <asm/cevt-r4k.h> + +/* + * Variant clock event timer support for SMTC on MIPS 34K, 1004K + * or other MIPS MT cores. + * + * Notes on SMTC Support: + * + * SMTC has multiple microthread TCs pretending to be Linux CPUs. + * But there's only one Count/Compare pair per VPE, and Compare + * interrupts are taken opportunisitically by available TCs + * bound to the VPE with the Count register. The new timer + * framework provides for global broadcasts, but we really + * want VPE-level multicasts for best behavior. So instead + * of invoking the high-level clock-event broadcast code, + * this version of SMTC support uses the historical SMTC + * multicast mechanisms "under the hood", appearing to the + * generic clock layer as if the interrupts are per-CPU. + * + * The approach taken here is to maintain a set of NR_CPUS + * virtual timers, and track which "CPU" needs to be alerted + * at each event. + * + * It's unlikely that we'll see a MIPS MT core with more than + * 2 VPEs, but we *know* that we won't need to handle more + * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements + * is always going to be overkill, but always going to be enough. + */ + +unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; +static int smtc_nextinvpe[NR_CPUS]; + +/* + * Timestamps stored are absolute values to be programmed + * into Count register. Valid timestamps will never be zero. + * If a Zero Count value is actually calculated, it is converted + * to be a 1, which will introduce 1 or two CPU cycles of error + * roughly once every four billion events, which at 1000 HZ means + * about once every 50 days. If that's actually a problem, one + * could alternate squashing 0 to 1 and to -1. + */ + +#define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) +#define ISVALID(x) ((x) != 0L) + +/* + * Time comparison is subtle, as it's really truncated + * modular arithmetic. + */ + +#define IS_SOONER(a, b, reference) \ + (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) + +/* + * CATCHUP_INCREMENT, used when the function falls behind the counter. + * Could be an increasing function instead of a constant; + */ + +#define CATCHUP_INCREMENT 64 + +static int mips_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + unsigned long flags; + unsigned int mtflags; + unsigned long timestamp, reference, previous; + unsigned long nextcomp = 0L; + int vpe = current_cpu_data.vpe_id; + int cpu = smp_processor_id(); + local_irq_save(flags); + mtflags = dmt(); + + /* + * Maintain the per-TC virtual timer + * and program the per-VPE shared Count register + * as appropriate here... + */ + reference = (unsigned long)read_c0_count(); + timestamp = MAKEVALID(reference + delta); + /* + * To really model the clock, we have to catch the case + * where the current next-in-VPE timestamp is the old + * timestamp for the calling CPE, but the new value is + * in fact later. In that case, we have to do a full + * scan and discover the new next-in-VPE CPU id and + * timestamp. + */ + previous = smtc_nexttime[vpe][cpu]; + if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) + && IS_SOONER(previous, timestamp, reference)) { + int i; + int soonest = cpu; + + /* + * Update timestamp array here, so that new + * value gets considered along with those of + * other virtual CPUs on the VPE. + */ + smtc_nexttime[vpe][cpu] = timestamp; + for_each_online_cpu(i) { + if (ISVALID(smtc_nexttime[vpe][i]) + && IS_SOONER(smtc_nexttime[vpe][i], + smtc_nexttime[vpe][soonest], reference)) { + soonest = i; + } + } + smtc_nextinvpe[vpe] = soonest; + nextcomp = smtc_nexttime[vpe][soonest]; + /* + * Otherwise, we don't have to process the whole array rank, + * we just have to see if the event horizon has gotten closer. + */ + } else { + if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || + IS_SOONER(timestamp, + smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { + smtc_nextinvpe[vpe] = cpu; + nextcomp = timestamp; + } + /* + * Since next-in-VPE may me the same as the executing + * virtual CPU, we update the array *after* checking + * its value. + */ + smtc_nexttime[vpe][cpu] = timestamp; + } + + /* + * It may be that, in fact, we don't need to update Compare, + * but if we do, we want to make sure we didn't fall into + * a crack just behind Count. + */ + if (ISVALID(nextcomp)) { + write_c0_compare(nextcomp); + ehb(); + /* + * We never return an error, we just make sure + * that we trigger the handlers as quickly as + * we can if we fell behind. + */ + while ((nextcomp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) { + nextcomp += CATCHUP_INCREMENT; + write_c0_compare(nextcomp); + ehb(); + } + } + emt(mtflags); + local_irq_restore(flags); + return 0; +} + + +void smtc_distribute_timer(int vpe) +{ + unsigned long flags; + unsigned int mtflags; + int cpu; + struct clock_event_device *cd; + unsigned long nextstamp = 0L; + unsigned long reference; + + +repeat: + for_each_online_cpu(cpu) { + /* + * Find virtual CPUs within the current VPE who have + * unserviced timer requests whose time is now past. + */ + local_irq_save(flags); + mtflags = dmt(); + if (cpu_data[cpu].vpe_id == vpe && + ISVALID(smtc_nexttime[vpe][cpu])) { + reference = (unsigned long)read_c0_count(); + if ((smtc_nexttime[vpe][cpu] - reference) + > (unsigned long)LONG_MAX) { + smtc_nexttime[vpe][cpu] = 0L; + emt(mtflags); + local_irq_restore(flags); + /* + * We don't send IPIs to ourself. + */ + if (cpu != smp_processor_id()) { + smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); + } else { + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); + } + } else { + /* Local to VPE but Valid Time not yet reached. */ + if (!ISVALID(nextstamp) || + IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, + reference)) { + smtc_nextinvpe[vpe] = cpu; + nextstamp = smtc_nexttime[vpe][cpu]; + } + emt(mtflags); + local_irq_restore(flags); + } + } else { + emt(mtflags); + local_irq_restore(flags); + + } + } + /* Reprogram for interrupt at next soonest timestamp for VPE */ + if (ISVALID(nextstamp)) { + write_c0_compare(nextstamp); + ehb(); + if ((nextstamp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) + goto repeat; + } +} + + +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +{ + int cpu = smp_processor_id(); + + /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ + handle_perf_irq(1); + + if (read_c0_cause() & (1 << 30)) { + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); + smtc_distribute_timer(cpu_data[cpu].vpe_id); + } + return IRQ_HANDLED; +} + + +int __cpuinit mips_clockevent_init(void) +{ + uint64_t mips_freq = mips_hpt_frequency; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + unsigned int irq; + int i; + int j; + + if (!cpu_has_counter || !mips_hpt_frequency) + return -ENXIO; + if (cpu == 0) { + for (i = 0; i < num_possible_cpus(); i++) { + smtc_nextinvpe[i] = 0; + for (j = 0; j < num_possible_cpus(); j++) + smtc_nexttime[i][j] = 0L; + } + /* + * SMTC also can't have the usablility test + * run by secondary TCs once Compare is in use. + */ + if (!c0_compare_int_usable()) + return -ENXIO; + } + + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of it's liking. + */ + irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; + if (get_c0_compare_int) + irq = get_c0_compare_int(); + + cd = &per_cpu(mips_clockevent_device, cpu); + + cd->name = "MIPS"; + cd->features = CLOCK_EVT_FEAT_ONESHOT; + + /* Calculate the min / max delta */ + cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); + cd->shift = 32; + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + + cd->rating = 300; + cd->irq = irq; + cd->cpumask = cpumask_of_cpu(cpu); + cd->set_next_event = mips_next_event; + cd->set_mode = mips_set_clock_mode; + cd->event_handler = mips_event_handler; + + clockevents_register_device(cd); + + /* + * On SMTC we only want to do the data structure + * initialization and IRQ setup once. + */ + if (cpu) + return 0; + /* + * And we need the hwmask associated with the c0_compare + * vector to be initialized. + */ + irq_hwmask[irq] = (0x100 << cp0_compare_irq); + if (cp0_timer_irq_installed) + return 0; + + cp0_timer_irq_installed = 1; + + setup_irq(irq, &c0_compare_irqaction); + + return 0; +} diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 11c92dc53791..e621fda8ab37 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -54,14 +54,18 @@ extern void r4k_wait(void); * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes * using this version a gamble. */ -static void r4k_wait_irqoff(void) +void r4k_wait_irqoff(void) { local_irq_disable(); if (!need_resched()) - __asm__(" .set mips3 \n" + __asm__(" .set push \n" + " .set mips3 \n" " wait \n" - " .set mips0 \n"); + " .set pop \n"); local_irq_enable(); + __asm__(" .globl __pastwait \n" + "__pastwait: \n"); + return; } /* diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e29598ae939d..ffa331029e08 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -79,11 +79,6 @@ FEXPORT(syscall_exit) FEXPORT(restore_all) # restore full frame #ifdef CONFIG_MIPS_MT_SMTC -/* Detect and execute deferred IPI "interrupts" */ - LONG_L s0, TI_REGS($28) - LONG_S sp, TI_REGS($28) - jal deferred_smtc_ipi - LONG_S s0, TI_REGS($28) #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP /* Re-arm any temporarily masked interrupts not explicitly "acked" */ mfc0 v0, CP0_TCSTATUS @@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame xor t0, t0, t3 mtc0 t0, CP0_TCCONTEXT #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ +/* Detect and execute deferred IPI "interrupts" */ + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) + jal deferred_smtc_ipi + LONG_S s0, TI_REGS($28) #endif /* CONFIG_MIPS_MT_SMTC */ .set noat RESTORE_TEMP diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index f886dd7f708e..01dcbe38fa01 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp) and t0, a0, t1 #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP mfc0 t2, CP0_TCCONTEXT - or t0, t0, t2 - mtc0 t0, CP0_TCCONTEXT + or t2, t0, t2 + mtc0 t2, CP0_TCCONTEXT #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ xor t1, t1, t0 mtc0 t1, CP0_STATUS diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 8f6d58ede33c..6e152c80cd4a 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -236,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, atomic_set(&kgdb_cpu_doing_single_step, -1); if (remcom_in_buffer[0] == 's') - if (kgdb_contthread) - atomic_set(&kgdb_cpu_doing_single_step, cpu); + atomic_set(&kgdb_cpu_doing_single_step, cpu); return 0; } diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index df4d3f2f740c..dc9eb72ed9de 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh); /* * FPU Use Factor empirically derived from experiments on 34K */ -#define FPUSEFACTOR 333 +#define FPUSEFACTOR 2000 static __init int mt_fp_affinity_init(void) { diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ce7684335a41..22fc19bbe87f 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -55,7 +55,7 @@ void __noreturn cpu_idle(void) while (1) { tick_nohz_stop_sched_tick(1); while (!need_resched()) { -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG +#ifdef CONFIG_MIPS_MT_SMTC extern void smtc_idle_loop_hook(void); smtc_idle_loop_hook(); @@ -145,19 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, */ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC restores TCStatus after Status, and the CU bits + * are aliased there. + */ + childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); +#endif clear_tsk_thread_flag(p, TIF_USEDFPU); #ifdef CONFIG_MIPS_MT_FPAFF clear_tsk_thread_flag(p, TIF_FPUBOUND); - - /* - * FPU affinity support is cleaner if we track the - * user-visible CPU affinity from the very beginning. - * The generic cpus_allowed mask will already have - * been copied from the parent before copy_thread - * is invoked. - */ - p->thread.user_cpus_allowed = p->cpus_allowed; #endif /* CONFIG_MIPS_MT_FPAFF */ if (clone_flags & CLONE_SETTLS) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 35234b92b9a5..96ffc9c6d194 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) case FPC_EIR: { /* implementation / version register */ unsigned int flags; #ifdef CONFIG_MIPS_MT_SMTC - unsigned int irqflags; + unsigned long irqflags; unsigned int mtflags; #endif /* CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a516286532ab..897fb2b4751c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -1,4 +1,21 @@ -/* Copyright (C) 2004 Mips Technologies, Inc */ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2004 Mips Technologies, Inc + * Copyright (C) 2008 Kevin D. Kissell + */ #include <linux/clockchips.h> #include <linux/kernel.h> @@ -21,7 +38,6 @@ #include <asm/time.h> #include <asm/addrspace.h> #include <asm/smtc.h> -#include <asm/smtc_ipi.h> #include <asm/smtc_proc.h> /* @@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS]; asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; -/* - * Clock interrupt "latch" buffers, per "CPU" - */ - -static atomic_t ipi_timer_latch[NR_CPUS]; /* * Number of InterProcessor Interrupt (IPI) message buffers to allocate @@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS]; #define IPIBUF_PER_CPU 4 -static struct smtc_ipi_q IPIQ[NR_CPUS]; +struct smtc_ipi_q IPIQ[NR_CPUS]; static struct smtc_ipi_q freeIPIq; @@ -282,7 +293,7 @@ static void smtc_configure_tlb(void) * phys_cpu_present_map and the logical/physical mappings. */ -int __init mipsmt_build_cpu_map(int start_cpu_slot) +int __init smtc_build_cpu_map(int start_cpu_slot) { int i, ntcs; @@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A); - write_tc_c0_tccontext(0); + /* + * TCContext gets an offset from the base of the IPIQ array + * to be used in low-level code to detect the presence of + * an active IPI queue + */ + write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); /* Bind tc to vpe */ write_tc_c0_tcbind(vpe); /* In general, all TCs should have the same cpu_data indications */ @@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) cpu_data[cpu].options &= ~MIPS_CPU_FPU; cpu_data[cpu].vpe_id = vpe; cpu_data[cpu].tc_id = tc; + /* Multi-core SMTC hasn't been tested, but be prepared */ + cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; } +/* + * Tweak to get Count registes in as close a sync as possible. + * Value seems good for 34K-class cores. + */ + +#define CP0_SKEW 8 -void mipsmt_prepare_cpus(void) +void smtc_prepare_cpus(int cpus) { int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; unsigned long flags; @@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void) IPIQ[i].head = IPIQ[i].tail = NULL; spin_lock_init(&IPIQ[i].lock); IPIQ[i].depth = 0; - atomic_set(&ipi_timer_latch[i], 0); } /* cpu_data index starts at zero */ cpu = 0; cpu_data[cpu].vpe_id = 0; cpu_data[cpu].tc_id = 0; + cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; cpu++; /* Report on boot-time options */ @@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void) write_vpe_c0_compare(0); /* Propagate Config7 */ write_vpe_c0_config7(read_c0_config7()); - write_vpe_c0_count(read_c0_count()); + write_vpe_c0_count(read_c0_count() + CP0_SKEW); + ehb(); } /* enable multi-threading within VPE */ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); @@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void) void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) { extern u32 kernelsp[NR_CPUS]; - long flags; + unsigned long flags; int mtflags; LOCK_MT_PRA(); @@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) void smtc_init_secondary(void) { - /* - * Start timer on secondary VPEs if necessary. - * plat_timer_setup has already have been invoked by init/main - * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that - * SMTC init code assigns TCs consdecutively and in ascending order - * to across available VPEs. - */ - if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && - ((read_c0_tcbind() & TCBIND_CURVPE) - != cpu_data[smp_processor_id() - 1].vpe_id)){ - write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); - } - local_irq_enable(); } void smtc_smp_finish(void) { + int cpu = smp_processor_id(); + + /* + * Lowest-numbered CPU per VPE starts a clock tick. + * Like per_cpu_trap_init() hack, this assumes that + * SMTC init code assigns TCs consdecutively and + * in ascending order across available VPEs. + */ + if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) + write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); + printk("TC %d going on-line as CPU %d\n", cpu_data[smp_processor_id()].tc_id, smp_processor_id()); } @@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) { int tcstatus; struct smtc_ipi *pipi; - long flags; + unsigned long flags; int mtflags; + unsigned long tcrestart; + extern void r4k_wait_irqoff(void), __pastwait(void); if (cpu == smp_processor_id()) { printk("Cannot Send IPI to self!\n"); @@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) pipi->arg = (void *)action; pipi->dest = cpu; if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { - if (type == SMTC_CLOCK_TICK) - atomic_inc(&ipi_timer_latch[cpu]); /* If not on same VPE, enqueue and send cross-VPE interrupt */ smtc_ipi_nq(&IPIQ[cpu], pipi); LOCK_CORE_PRA(); @@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) if ((tcstatus & TCSTATUS_IXMT) != 0) { /* - * Spin-waiting here can deadlock, - * so we queue the message for the target TC. + * If we're in the the irq-off version of the wait + * loop, we need to force exit from the wait and + * do a direct post of the IPI. + */ + if (cpu_wait == r4k_wait_irqoff) { + tcrestart = read_tc_c0_tcrestart(); + if (tcrestart >= (unsigned long)r4k_wait_irqoff + && tcrestart < (unsigned long)__pastwait) { + write_tc_c0_tcrestart(__pastwait); + tcstatus &= ~TCSTATUS_IXMT; + write_tc_c0_tcstatus(tcstatus); + goto postdirect; + } + } + /* + * Otherwise we queue the message for the target TC + * to pick up when he does a local_irq_restore() */ write_tc_c0_tchalt(0); UNLOCK_CORE_PRA(); - /* Try to reduce redundant timer interrupt messages */ - if (type == SMTC_CLOCK_TICK) { - if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ - smtc_ipi_nq(&freeIPIq, pipi); - return; - } - } smtc_ipi_nq(&IPIQ[cpu], pipi); } else { - if (type == SMTC_CLOCK_TICK) - atomic_inc(&ipi_timer_latch[cpu]); +postdirect: post_direct_ipi(cpu, pipi); write_tc_c0_tchalt(0); UNLOCK_CORE_PRA(); @@ -883,7 +913,7 @@ static void ipi_call_interrupt(void) smp_call_function_interrupt(); } -DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); +DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); void ipi_decode(struct smtc_ipi *pipi) { @@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi) struct clock_event_device *cd; void *arg_copy = pipi->arg; int type_copy = pipi->type; - int ticks; - smtc_ipi_nq(&freeIPIq, pipi); switch (type_copy) { case SMTC_CLOCK_TICK: irq_enter(); kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; - cd = &per_cpu(smtc_dummy_clockevent_device, cpu); - ticks = atomic_read(&ipi_timer_latch[cpu]); - atomic_sub(ticks, &ipi_timer_latch[cpu]); - while (ticks) { - cd->event_handler(cd); - ticks--; - } + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); irq_exit(); break; @@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi) } } +/* + * Similar to smtc_ipi_replay(), but invoked from context restore, + * so it reuses the current exception frame rather than set up a + * new one with self_ipi. + */ + void deferred_smtc_ipi(void) { - struct smtc_ipi *pipi; - unsigned long flags; -/* DEBUG */ - int q = smp_processor_id(); + int cpu = smp_processor_id(); /* * Test is not atomic, but much faster than a dequeue, * and the vast majority of invocations will have a null queue. + * If irq_disabled when this was called, then any IPIs queued + * after we test last will be taken on the next irq_enable/restore. + * If interrupts were enabled, then any IPIs added after the + * last test will be taken directly. */ - if (IPIQ[q].head != NULL) { - while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { - /* ipi_decode() should be called with interrupts off */ - local_irq_save(flags); + + while (IPIQ[cpu].head != NULL) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + unsigned long flags; + + /* + * It may be possible we'll come in with interrupts + * already enabled. + */ + local_irq_save(flags); + + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + if (pipi != NULL) ipi_decode(pipi); - local_irq_restore(flags); - } + /* + * The use of the __raw_local restore isn't + * as obviously necessary here as in smtc_ipi_replay(), + * but it's more efficient, given that we're already + * running down the IPI queue. + */ + __raw_local_irq_restore(flags); } } @@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm) struct smtc_ipi *pipi; unsigned long tcstatus; int sent; - long flags; + unsigned long flags; unsigned int mtflags; unsigned int vpflags; @@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) /* * SMTC-specific hacks invoked from elsewhere in the kernel. - * - * smtc_ipi_replay is called from raw_local_irq_restore which is only ever - * called with interrupts disabled. We do rely on interrupts being disabled - * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would - * result in a recursive call to raw_local_irq_restore(). */ -static void __smtc_ipi_replay(void) + /* + * smtc_ipi_replay is called from raw_local_irq_restore + */ + +void smtc_ipi_replay(void) { unsigned int cpu = smp_processor_id(); /* * To the extent that we've ever turned interrupts off, * we may have accumulated deferred IPIs. This is subtle. - * If we use the smtc_ipi_qdepth() macro, we'll get an - * exact number - but we'll also disable interrupts - * and create a window of failure where a new IPI gets - * queued after we test the depth but before we re-enable - * interrupts. So long as IXMT never gets set, however, * we should be OK: If we pick up something and dispatch * it here, that's great. If we see nothing, but concurrent * with this operation, another TC sends us an IPI, IXMT * is clear, and we'll handle it as a real pseudo-interrupt - * and not a pseudo-pseudo interrupt. + * and not a pseudo-pseudo interrupt. The important thing + * is to do the last check for queued message *after* the + * re-enabling of interrupts. */ - if (IPIQ[cpu].depth > 0) { - while (1) { - struct smtc_ipi_q *q = &IPIQ[cpu]; - struct smtc_ipi *pipi; - extern void self_ipi(struct smtc_ipi *); - - spin_lock(&q->lock); - pipi = __smtc_ipi_dq(q); - spin_unlock(&q->lock); - if (!pipi) - break; + while (IPIQ[cpu].head != NULL) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + unsigned long flags; + + /* + * It's just possible we'll come in with interrupts + * already enabled. + */ + local_irq_save(flags); + + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + /* + ** But use a raw restore here to avoid recursion. + */ + __raw_local_irq_restore(flags); + if (pipi) { self_ipi(pipi); smtc_cpu_stats[cpu].selfipis++; } } } -void smtc_ipi_replay(void) -{ - raw_local_irq_disable(); - __smtc_ipi_replay(); -} - EXPORT_SYMBOL(smtc_ipi_replay); void smtc_idle_loop_hook(void) @@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void) } } - /* - * Now that we limit outstanding timer IPIs, check for hung TC - */ - for (tc = 0; tc < NR_CPUS; tc++) { - /* Don't check ourself - we'll dequeue IPIs just below */ - if ((tc != smp_processor_id()) && - atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { - if (clock_hang_reported[tc] == 0) { - pdb_msg += sprintf(pdb_msg, - "TC %d looks hung with timer latch at %d\n", - tc, atomic_read(&ipi_timer_latch[tc])); - clock_hang_reported[tc]++; - } - } - } emt(mtflags); local_irq_restore(flags); if (pdb_msg != &id_ho_db_msg[0]) printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - /* - * Replay any accumulated deferred IPIs. If "Instant Replay" - * is in use, there should never be any. - */ -#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY - { - unsigned long flags; - - local_irq_save(flags); - __smtc_ipi_replay(); - local_irq_restore(flags); - } -#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ + smtc_ipi_replay(); } void smtc_soft_dump(void) @@ -1242,10 +1260,6 @@ void smtc_soft_dump(void) printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); } smtc_ipi_qdump(); - printk("Timer IPI Backlogs:\n"); - for (i=0; i < NR_CPUS; i++) { - printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); - } printk("%d Recoveries of \"stolen\" FPU\n", atomic_read(&smtc_fpu_recoveries)); } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 5fd0cd020af5..b602ac6eb47d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -825,8 +825,10 @@ static void mt_ase_fp_affinity(void) if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { cpumask_t tmask; - cpus_and(tmask, current->thread.user_cpus_allowed, - mt_fpu_cpumask); + current->thread.user_cpus_allowed + = current->cpus_allowed; + cpus_and(tmask, current->cpus_allowed, + mt_fpu_cpumask); set_cpus_allowed(current, tmask); set_thread_flag(TIF_FPUBOUND); } diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index 3b7dd722c32a..cef2db8d2225 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile @@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o obj-$(CONFIG_PCI) += malta-pci.o # FIXME FIXME FIXME -obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o +obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o EXTRA_CFLAGS += -Werror diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index 5ea705e49454..f84a46a8ae6e 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c @@ -84,12 +84,17 @@ static void msmtc_cpus_done(void) static void __init msmtc_smp_setup(void) { - mipsmt_build_cpu_map(0); + /* + * we won't get the definitive value until + * we've run smtc_prepare_cpus later, but + * we would appear to need an upper bound now. + */ + smp_num_siblings = smtc_build_cpu_map(0); } static void __init msmtc_prepare_cpus(unsigned int max_cpus) { - mipsmt_prepare_cpus(); + smtc_prepare_cpus(max_cpus); } struct plat_smp_ops msmtc_smp_ops = { diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 15e01aec37fd..c8c32f417b6c 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_SOC_TX3927) += ops-tx3927.o obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o +obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o # # These are still pretty much in the old state, watch, go blind. diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c new file mode 100644 index 000000000000..bea9b6cdfdbf --- /dev/null +++ b/arch/mips/pci/pci-bcm47xx.c @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/ssb/ssb.h> + +int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return 0; +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + int res; + u8 slot, pin; + + res = ssb_pcibios_plat_dev_init(dev); + if (res < 0) { + printk(KERN_ALERT "PCI: Failed to init device %s\n", + pci_name(dev)); + return res; + } + + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + slot = PCI_SLOT(dev->devfn); + res = ssb_pcibios_map_irq(dev, slot, pin); + + /* IRQ-0 and IRQ-1 are software interrupts. */ + if (res < 2) { + printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n", + pci_name(dev)); + return res; + } + + dev->irq = res; + return 0; +} + diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c index bd78368c82bf..f97ab1461012 100644 --- a/arch/mips/pci/pci-ip27.c +++ b/arch/mips/pci/pci-ip27.c @@ -143,25 +143,47 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid) */ int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { + return 0; +} + +/* Most MIPS systems have straight-forward swizzling needs. */ +static inline u8 bridge_swizzle(u8 pin, u8 slot) +{ + return (((pin - 1) + slot) % 4) + 1; +} + +static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev) +{ + while (dev->bus->parent) { + /* Move up the chain of bridges. */ + dev = dev->bus->self; + } + + return dev; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); - int irq = bc->pci_int[slot]; + struct pci_dev *rdev = bridge_root_dev(dev); + int slot = PCI_SLOT(rdev->devfn); + int irq; + irq = bc->pci_int[slot]; if (irq == -1) { - irq = bc->pci_int[slot] = request_bridge_irq(bc); + irq = request_bridge_irq(bc); if (irq < 0) - panic("Can't allocate interrupt for PCI device %s\n", - pci_name(dev)); + return irq; + + bc->pci_int[slot] = irq; } irq_to_bridge[irq] = bc; irq_to_slot[irq] = slot; - return irq; -} + dev->irq = irq; -/* Do platform specific device initialization at pci_enable_device() time */ -int pcibios_plat_dev_init(struct pci_dev *dev) -{ return 0; } diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 761c434a2488..56c64ccc9c21 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -20,22 +20,8 @@ EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); atomic_t irq_err_count; /* - * MN10300 INTC controller operations + * MN10300 interrupt controller operations */ -static void mn10300_cpupic_disable(unsigned int irq) -{ - u16 tmp = GxICR(irq); - GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; - tmp = GxICR(irq); -} - -static void mn10300_cpupic_enable(unsigned int irq) -{ - u16 tmp = GxICR(irq); - GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; - tmp = GxICR(irq); -} - static void mn10300_cpupic_ack(unsigned int irq) { u16 tmp; @@ -60,26 +46,54 @@ static void mn10300_cpupic_mask_ack(unsigned int irq) static void mn10300_cpupic_unmask(unsigned int irq) { u16 tmp = GxICR(irq); - GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; + GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; tmp = GxICR(irq); } -static void mn10300_cpupic_end(unsigned int irq) +static void mn10300_cpupic_unmask_clear(unsigned int irq) { + /* the MN10300 PIC latches its interrupt request bit, even after the + * device has ceased to assert its interrupt line and the interrupt + * channel has been disabled in the PIC, so for level-triggered + * interrupts we need to clear the request bit when we re-enable */ u16 tmp = GxICR(irq); - GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; + GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; tmp = GxICR(irq); } -static struct irq_chip mn10300_cpu_pic = { - .name = "cpu", - .disable = mn10300_cpupic_disable, - .enable = mn10300_cpupic_enable, +/* + * MN10300 PIC level-triggered IRQ handling. + * + * The PIC has no 'ACK' function per se. It is possible to clear individual + * channel latches, but each latch relatches whether or not the channel is + * masked, so we need to clear the latch when we unmask the channel. + * + * Also for this reason, we don't supply an ack() op (it's unused anyway if + * mask_ack() is provided), and mask_ack() just masks. + */ +static struct irq_chip mn10300_cpu_pic_level = { + .name = "cpu_l", + .disable = mn10300_cpupic_mask, + .enable = mn10300_cpupic_unmask_clear, + .ack = NULL, + .mask = mn10300_cpupic_mask, + .mask_ack = mn10300_cpupic_mask, + .unmask = mn10300_cpupic_unmask_clear, +}; + +/* + * MN10300 PIC edge-triggered IRQ handling. + * + * We use the latch clearing function of the PIC as the 'ACK' function. + */ +static struct irq_chip mn10300_cpu_pic_edge = { + .name = "cpu_e", + .disable = mn10300_cpupic_mask, + .enable = mn10300_cpupic_unmask, .ack = mn10300_cpupic_ack, .mask = mn10300_cpupic_mask, .mask_ack = mn10300_cpupic_mask_ack, .unmask = mn10300_cpupic_unmask, - .end = mn10300_cpupic_end, }; /* @@ -114,7 +128,8 @@ void set_intr_level(int irq, u16 level) */ void set_intr_postackable(int irq) { - set_irq_handler(irq, handle_level_irq); + set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, + handle_level_irq); } /* @@ -126,8 +141,12 @@ void __init init_IRQ(void) for (irq = 0; irq < NR_IRQS; irq++) if (irq_desc[irq].chip == &no_irq_type) - set_irq_chip_and_handler(irq, &mn10300_cpu_pic, - handle_edge_irq); + /* due to the PIC latching interrupt requests, even + * when the IRQ is disabled, IRQ_PENDING is superfluous + * and we can use handle_level_irq() for edge-triggered + * interrupts */ + set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, + handle_level_irq); unit_init_IRQ(); } diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c index babb7c2ac377..e4606586f94c 100644 --- a/arch/mn10300/kernel/time.c +++ b/arch/mn10300/kernel/time.c @@ -1,6 +1,6 @@ /* MN10300 Low level time management * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2007-2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from arch/i386/kernel/time.c * @@ -16,6 +16,7 @@ #include <linux/init.h> #include <linux/smp.h> #include <linux/profile.h> +#include <linux/cnt32_to_63.h> #include <asm/irq.h> #include <asm/div64.h> #include <asm/processor.h> @@ -40,27 +41,54 @@ static struct irqaction timer_irq = { .name = "timer", }; +static unsigned long sched_clock_multiplier; + /* * scheduler clock - returns current time in nanosec units. */ unsigned long long sched_clock(void) { union { - unsigned long long l; - u32 w[2]; - } quot; + unsigned long long ll; + unsigned l[2]; + } tsc64, result; + unsigned long tsc, tmp; + unsigned product[3]; /* 96-bit intermediate value */ + + /* read the TSC value + */ + tsc = 0 - get_cycles(); /* get_cycles() counts down */ - quot.w[0] = mn10300_last_tsc - get_cycles(); - quot.w[1] = 1000000000; + /* expand to 64-bits. + * - sched_clock() must be called once a minute or better or the + * following will go horribly wrong - see cnt32_to_63() + */ + tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL; - asm("mulu %2,%3,%0,%1" - : "=r"(quot.w[1]), "=r"(quot.w[0]) - : "0"(quot.w[1]), "1"(quot.w[0]) + /* scale the 64-bit TSC value to a nanosecond value via a 96-bit + * intermediate + */ + asm("mulu %2,%0,%3,%0 \n" /* LSW * mult -> 0:%3:%0 */ + "mulu %2,%1,%2,%1 \n" /* MSW * mult -> %2:%1:0 */ + "add %3,%1 \n" + "addc 0,%2 \n" /* result in %2:%1:%0 */ + : "=r"(product[0]), "=r"(product[1]), "=r"(product[2]), "=r"(tmp) + : "0"(tsc64.l[0]), "1"(tsc64.l[1]), "2"(sched_clock_multiplier) : "cc"); - do_div(quot.l, MN10300_TSCCLK); + result.l[0] = product[1] << 16 | product[0] >> 16; + result.l[1] = product[2] << 16 | product[1] >> 16; - return quot.l; + return result.ll; +} + +/* + * initialise the scheduler clock + */ +static void __init mn10300_sched_clock_init(void) +{ + sched_clock_multiplier = + __muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK); } /* @@ -128,4 +156,6 @@ void __init time_init(void) /* start the watchdog timer */ watchdog_go(); #endif + + mn10300_sched_clock_init(); } diff --git a/arch/mn10300/unit-asb2303/unit-init.c b/arch/mn10300/unit-asb2303/unit-init.c index 14b2c817cff8..70e8cb4ea266 100644 --- a/arch/mn10300/unit-asb2303/unit-init.c +++ b/arch/mn10300/unit-asb2303/unit-init.c @@ -51,7 +51,7 @@ void __init unit_init_IRQ(void) switch (GET_XIRQ_TRIGGER(extnum)) { case XIRQ_TRIGGER_HILEVEL: case XIRQ_TRIGGER_LOWLEVEL: - set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq); + set_intr_postackable(XIRQ2IRQ(extnum)); break; default: break; diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c index 6a352414a358..72812a9439ac 100644 --- a/arch/mn10300/unit-asb2305/unit-init.c +++ b/arch/mn10300/unit-asb2305/unit-init.c @@ -52,7 +52,7 @@ void __init unit_init_IRQ(void) switch (GET_XIRQ_TRIGGER(extnum)) { case XIRQ_TRIGGER_HILEVEL: case XIRQ_TRIGGER_LOWLEVEL: - set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq); + set_intr_postackable(XIRQ2IRQ(extnum)); break; default: break; diff --git a/arch/powerpc/boot/dts/holly.dts b/arch/powerpc/boot/dts/holly.dts index f87fe7b9ced9..c6e11ebecebb 100644 --- a/arch/powerpc/boot/dts/holly.dts +++ b/arch/powerpc/boot/dts/holly.dts @@ -133,61 +133,61 @@ reg = <0x00007400 0x00000400>; big-endian; }; + }; - pci@1000 { - device_type = "pci"; - compatible = "tsi109-pci", "tsi108-pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0x00001000 0x00001000>; - bus-range = <0x0 0x0>; - /*----------------------------------------------------+ - | PCI memory range. - | 01 denotes I/O space - | 02 denotes 32-bit memory space - +----------------------------------------------------*/ - ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000 - 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>; - clock-frequency = <133333332>; - interrupt-parent = <&MPIC>; + pci@c0001000 { + device_type = "pci"; + compatible = "tsi109-pci", "tsi108-pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xc0001000 0x00001000>; + bus-range = <0x0 0x0>; + /*----------------------------------------------------+ + | PCI memory range. + | 01 denotes I/O space + | 02 denotes 32-bit memory space + +----------------------------------------------------*/ + ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000 + 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>; + clock-frequency = <133333332>; + interrupt-parent = <&MPIC>; + interrupts = <0x17 0x2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + /*----------------------------------------------------+ + | The INTA, INTB, INTC, INTD are shared. + +----------------------------------------------------*/ + interrupt-map = < + 0x800 0x0 0x0 0x1 &RT0 0x24 0x0 + 0x800 0x0 0x0 0x2 &RT0 0x25 0x0 + 0x800 0x0 0x0 0x3 &RT0 0x26 0x0 + 0x800 0x0 0x0 0x4 &RT0 0x27 0x0 + + 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0 + 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0 + 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0 + 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0 + + 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0 + 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0 + 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0 + 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0 + + 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0 + 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0 + 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0 + 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0 + >; + + RT0: router@1180 { + device_type = "pic-router"; + interrupt-controller; + big-endian; + clock-frequency = <0>; + #address-cells = <0>; + #interrupt-cells = <2>; interrupts = <0x17 0x2>; - interrupt-map-mask = <0xf800 0x0 0x0 0x7>; - /*----------------------------------------------------+ - | The INTA, INTB, INTC, INTD are shared. - +----------------------------------------------------*/ - interrupt-map = < - 0x800 0x0 0x0 0x1 &RT0 0x24 0x0 - 0x800 0x0 0x0 0x2 &RT0 0x25 0x0 - 0x800 0x0 0x0 0x3 &RT0 0x26 0x0 - 0x800 0x0 0x0 0x4 &RT0 0x27 0x0 - - 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0 - 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0 - 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0 - 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0 - - 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0 - 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0 - 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0 - 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0 - - 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0 - 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0 - 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0 - 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0 - >; - - RT0: router@1180 { - device_type = "pic-router"; - interrupt-controller; - big-endian; - clock-frequency = <0>; - #address-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x17 0x2>; - interrupt-parent = <&MPIC>; - }; + interrupt-parent = <&MPIC>; }; }; diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index d308a9f70f1b..31982d05d81a 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -34,11 +34,7 @@ #include <asm/smp.h> #ifdef CONFIG_HOTPLUG_CPU -/* this is used for software suspend, and that shuts down - * CPUs even while the system is still booting... */ -#define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \ - (system_state == SYSTEM_RUNNING \ - || system_state == SYSTEM_BOOTING)) +#define cpu_should_die() cpu_is_offline(smp_processor_id()) #else #define cpu_should_die() 0 #endif diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index b4fdf2f2743c..fe8f71dd0b3f 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -347,9 +347,8 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, linux_regs->msr |= MSR_SE; #endif kgdb_single_step = 1; - if (kgdb_contthread) - atomic_set(&kgdb_cpu_doing_single_step, - raw_smp_processor_id()); + atomic_set(&kgdb_cpu_doing_single_step, + raw_smp_processor_id()); } return 0; } diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c index ef74a0763ec1..8c619963becc 100644 --- a/arch/powerpc/platforms/fsl_uli1575.c +++ b/arch/powerpc/platforms/fsl_uli1575.c @@ -219,11 +219,21 @@ static void __devinit quirk_final_uli5249(struct pci_dev *dev) int i; u8 *dummy; struct pci_bus *bus = dev->bus; + resource_size_t end = 0; + + for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) { + unsigned long flags = pci_resource_flags(dev, i); + if ((flags & (IORESOURCE_MEM|IORESOURCE_PREFETCH)) == IORESOURCE_MEM) + end = pci_resource_end(dev, i); + } for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { if ((bus->resource[i]) && (bus->resource[i]->flags & IORESOURCE_MEM)) { - dummy = ioremap(bus->resource[i]->end - 3, 0x4); + if (bus->resource[i]->end == end) + dummy = ioremap(bus->resource[i]->start, 0x4); + else + dummy = ioremap(bus->resource[i]->end - 3, 0x4); if (dummy) { in_8(dummy); iounmap(dummy); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index ca114fe46ffb..06acb1a18bbc 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -169,6 +169,8 @@ void init_cpu_timer(void) static void clock_comparator_interrupt(__u16 code) { + if (S390_lowcore.clock_comparator == -1ULL) + set_clock_comparator(S390_lowcore.clock_comparator); } static void etr_timing_alert(struct etr_irq_parm *); diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index fc6ab6094df8..0953cee05efc 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -1,14 +1,9 @@ /* - * arch/s390/lib/delay.c * Precise Delay Loops for S390 * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - * - * Derived from "arch/i386/lib/delay.c" - * Copyright (C) 1993 Linus Torvalds - * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> + * Copyright IBM Corp. 1999,2008 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Heiko Carstens <heiko.carstens@de.ibm.com>, */ #include <linux/sched.h> @@ -29,30 +24,31 @@ void __delay(unsigned long loops) asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); } -/* - * Waits for 'usecs' microseconds using the TOD clock comparator. - */ -void __udelay(unsigned long usecs) +static void __udelay_disabled(unsigned long usecs) { - u64 end, time, old_cc = 0; - unsigned long flags, cr0, mask, dummy; - int irq_context; + unsigned long mask, cr0, cr0_saved; + u64 clock_saved; - irq_context = in_interrupt(); - if (!irq_context) - local_bh_disable(); - local_irq_save(flags); - if (raw_irqs_disabled_flags(flags)) { - old_cc = local_tick_disable(); - S390_lowcore.clock_comparator = -1ULL; - __ctl_store(cr0, 0, 0); - dummy = (cr0 & 0xffff00e0) | 0x00000800; - __ctl_load(dummy , 0, 0); - mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; - } else - mask = psw_kernel_bits | PSW_MASK_WAIT | - PSW_MASK_EXT | PSW_MASK_IO; + clock_saved = local_tick_disable(); + set_clock_comparator(get_clock() + ((u64) usecs << 12)); + __ctl_store(cr0_saved, 0, 0); + cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; + __ctl_load(cr0 , 0, 0); + mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; + trace_hardirqs_on(); + __load_psw_mask(mask); + local_irq_disable(); + __ctl_load(cr0_saved, 0, 0); + local_tick_enable(clock_saved); + set_clock_comparator(S390_lowcore.clock_comparator); +} +static void __udelay_enabled(unsigned long usecs) +{ + unsigned long mask; + u64 end, time; + + mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO; end = get_clock() + ((u64) usecs << 12); do { time = end < S390_lowcore.clock_comparator ? @@ -62,13 +58,37 @@ void __udelay(unsigned long usecs) __load_psw_mask(mask); local_irq_disable(); } while (get_clock() < end); + set_clock_comparator(S390_lowcore.clock_comparator); +} - if (raw_irqs_disabled_flags(flags)) { - __ctl_load(cr0, 0, 0); - local_tick_enable(old_cc); +/* + * Waits for 'usecs' microseconds using the TOD clock comparator. + */ +void __udelay(unsigned long usecs) +{ + unsigned long flags; + + preempt_disable(); + local_irq_save(flags); + if (in_irq()) { + __udelay_disabled(usecs); + goto out; + } + if (in_softirq()) { + if (raw_irqs_disabled_flags(flags)) + __udelay_disabled(usecs); + else + __udelay_enabled(usecs); + goto out; } - if (!irq_context) + if (raw_irqs_disabled_flags(flags)) { + local_bh_disable(); + __udelay_disabled(usecs); _local_bh_enable(); - set_clock_comparator(S390_lowcore.clock_comparator); + goto out; + } + __udelay_enabled(usecs); +out: local_irq_restore(flags); + preempt_enable(); } diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index f845f150f565..100ebd527499 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c @@ -169,7 +169,7 @@ static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long fla static int of_bus_pci_match(struct device_node *np) { - if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { + if (!strcmp(np->name, "pci")) { const char *model = of_get_property(np, "model", NULL); if (model && !strcmp(model, "SUNW,simba")) @@ -200,7 +200,7 @@ static int of_bus_simba_match(struct device_node *np) /* Treat PCI busses lacking ranges property just like * simba. */ - if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { + if (!strcmp(np->name, "pci")) { if (!of_find_property(np, "ranges", NULL)) return 1; } @@ -429,7 +429,7 @@ static int __init use_1to1_mapping(struct device_node *pp) * it lacks a ranges property, and this will include * cases like Simba. */ - if (!strcmp(pp->type, "pci") || !strcmp(pp->type, "pciex")) + if (!strcmp(pp->name, "pci")) return 0; return 1; @@ -714,8 +714,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op, break; } } else { - if (!strcmp(pp->type, "pci") || - !strcmp(pp->type, "pciex")) { + if (!strcmp(pp->name, "pci")) { unsigned int this_orig_irq = irq; irq = pci_irq_swizzle(dp, pp, irq); diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 55096195458f..80dad76f8b81 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c @@ -425,7 +425,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->current_state = 4; /* unknown power state */ dev->error_state = pci_channel_io_normal; - if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { + if (!strcmp(node->name, "pci")) { /* a PCI-PCI bridge */ dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; dev->rom_base_reg = PCI_ROM_ADDRESS1; diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index a1310c52fc0c..857e492c571e 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c @@ -492,7 +492,7 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) continue; } sh_symtab = sec_symtab->symtab; - sym_strtab = sec->link->strtab; + sym_strtab = sec_symtab->link->strtab; for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { Elf32_Rel *rel; Elf32_Sym *sym; diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 69b4d060b21c..042fdc27bc92 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -101,10 +101,10 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) */ static int iommu_completion_wait(struct amd_iommu *iommu) { - int ret, ready = 0; + int ret = 0, ready = 0; unsigned status = 0; struct iommu_cmd cmd; - unsigned long i = 0; + unsigned long flags, i = 0; memset(&cmd, 0, sizeof(cmd)); cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; @@ -112,10 +112,12 @@ static int iommu_completion_wait(struct amd_iommu *iommu) iommu->need_sync = 0; - ret = iommu_queue_command(iommu, &cmd); + spin_lock_irqsave(&iommu->lock, flags); + + ret = __iommu_queue_command(iommu, &cmd); if (ret) - return ret; + goto out; while (!ready && (i < EXIT_LOOP_COUNT)) { ++i; @@ -130,6 +132,8 @@ static int iommu_completion_wait(struct amd_iommu *iommu) if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); +out: + spin_unlock_irqrestore(&iommu->lock, flags); return 0; } @@ -140,6 +144,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) { struct iommu_cmd cmd; + int ret; BUG_ON(iommu == NULL); @@ -147,9 +152,11 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); cmd.data[0] = devid; + ret = iommu_queue_command(iommu, &cmd); + iommu->need_sync = 1; - return iommu_queue_command(iommu, &cmd); + return ret; } /* @@ -159,6 +166,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, u64 address, u16 domid, int pde, int s) { struct iommu_cmd cmd; + int ret; memset(&cmd, 0, sizeof(cmd)); address &= PAGE_MASK; @@ -171,9 +179,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; + ret = iommu_queue_command(iommu, &cmd); + iommu->need_sync = 1; - return iommu_queue_command(iommu, &cmd); + return ret; } /* diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 9ee24e6bc4b0..732d1f4e10ee 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -234,6 +234,7 @@ #include <asm/uaccess.h> #include <asm/desc.h> #include <asm/i8253.h> +#include <asm/olpc.h> #include <asm/paravirt.h> #include <asm/reboot.h> @@ -2217,7 +2218,7 @@ static int __init apm_init(void) dmi_check_system(apm_dmi_table); - if (apm_info.bios.version == 0 || paravirt_enabled()) { + if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) { printk(KERN_INFO "apm: BIOS not found.\n"); return -ENODEV; } diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index b117d7f8a564..885c8265e6b5 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -834,7 +834,7 @@ static int __init enable_mtrr_cleanup_setup(char *str) enable_mtrr_cleanup = 1; return 0; } -early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup); +early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); struct var_mtrr_state { unsigned long range_startk; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index f47f0eb886b8..8282a2139681 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -69,6 +69,9 @@ static int gdb_x86vector = -1; */ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { +#ifndef CONFIG_X86_32 + u32 *gdb_regs32 = (u32 *)gdb_regs; +#endif gdb_regs[GDB_AX] = regs->ax; gdb_regs[GDB_BX] = regs->bx; gdb_regs[GDB_CX] = regs->cx; @@ -76,9 +79,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) gdb_regs[GDB_SI] = regs->si; gdb_regs[GDB_DI] = regs->di; gdb_regs[GDB_BP] = regs->bp; - gdb_regs[GDB_PS] = regs->flags; gdb_regs[GDB_PC] = regs->ip; #ifdef CONFIG_X86_32 + gdb_regs[GDB_PS] = regs->flags; gdb_regs[GDB_DS] = regs->ds; gdb_regs[GDB_ES] = regs->es; gdb_regs[GDB_CS] = regs->cs; @@ -94,6 +97,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) gdb_regs[GDB_R13] = regs->r13; gdb_regs[GDB_R14] = regs->r14; gdb_regs[GDB_R15] = regs->r15; + gdb_regs32[GDB_PS] = regs->flags; + gdb_regs32[GDB_CS] = regs->cs; + gdb_regs32[GDB_SS] = regs->ss; #endif gdb_regs[GDB_SP] = regs->sp; } @@ -112,6 +118,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) */ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { +#ifndef CONFIG_X86_32 + u32 *gdb_regs32 = (u32 *)gdb_regs; +#endif gdb_regs[GDB_AX] = 0; gdb_regs[GDB_BX] = 0; gdb_regs[GDB_CX] = 0; @@ -129,8 +138,10 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) gdb_regs[GDB_FS] = 0xFFFF; gdb_regs[GDB_GS] = 0xFFFF; #else - gdb_regs[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); - gdb_regs[GDB_PC] = 0; + gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); + gdb_regs32[GDB_CS] = __KERNEL_CS; + gdb_regs32[GDB_SS] = __KERNEL_DS; + gdb_regs[GDB_PC] = p->thread.ip; gdb_regs[GDB_R8] = 0; gdb_regs[GDB_R9] = 0; gdb_regs[GDB_R10] = 0; @@ -153,6 +164,9 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) */ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) { +#ifndef CONFIG_X86_32 + u32 *gdb_regs32 = (u32 *)gdb_regs; +#endif regs->ax = gdb_regs[GDB_AX]; regs->bx = gdb_regs[GDB_BX]; regs->cx = gdb_regs[GDB_CX]; @@ -160,9 +174,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) regs->si = gdb_regs[GDB_SI]; regs->di = gdb_regs[GDB_DI]; regs->bp = gdb_regs[GDB_BP]; - regs->flags = gdb_regs[GDB_PS]; regs->ip = gdb_regs[GDB_PC]; #ifdef CONFIG_X86_32 + regs->flags = gdb_regs[GDB_PS]; regs->ds = gdb_regs[GDB_DS]; regs->es = gdb_regs[GDB_ES]; regs->cs = gdb_regs[GDB_CS]; @@ -175,6 +189,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) regs->r13 = gdb_regs[GDB_R13]; regs->r14 = gdb_regs[GDB_R14]; regs->r15 = gdb_regs[GDB_R15]; + regs->flags = gdb_regs32[GDB_PS]; + regs->cs = gdb_regs32[GDB_CS]; + regs->ss = gdb_regs32[GDB_SS]; #endif } @@ -378,10 +395,8 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, if (remcomInBuffer[0] == 's') { linux_regs->flags |= X86_EFLAGS_TF; kgdb_single_step = 1; - if (kgdb_contthread) { - atomic_set(&kgdb_cpu_doing_single_step, - raw_smp_processor_id()); - } + atomic_set(&kgdb_cpu_doing_single_step, + raw_smp_processor_id()); } get_debugreg(dr6, 6); @@ -466,9 +481,15 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) case DIE_DEBUG: if (atomic_read(&kgdb_cpu_doing_single_step) == - raw_smp_processor_id() && - user_mode(regs)) - return single_step_cont(regs, args); + raw_smp_processor_id()) { + if (user_mode(regs)) + return single_step_cont(regs, args); + break; + } else if (test_thread_flag(TIF_SINGLESTEP)) + /* This means a user thread is single stepping + * a system call which should be ignored + */ + return NOTIFY_DONE; /* fall through */ default: if (user_mode(regs)) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7fc4d5b0a6a0..876e91890777 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -246,6 +246,14 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) return 1; } +static cpumask_t c1e_mask = CPU_MASK_NONE; +static int c1e_detected; + +void c1e_remove_cpu(int cpu) +{ + cpu_clear(cpu, c1e_mask); +} + /* * C1E aware idle routine. We check for C1E active in the interrupt * pending message MSR. If we detect C1E, then we handle it the same @@ -253,9 +261,6 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) */ static void c1e_idle(void) { - static cpumask_t c1e_mask = CPU_MASK_NONE; - static int c1e_detected; - if (need_resched()) return; @@ -265,8 +270,10 @@ static void c1e_idle(void) rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); if (lo & K8_INTP_C1E_ACTIVE_MASK) { c1e_detected = 1; - mark_tsc_unstable("TSC halt in C1E"); - printk(KERN_INFO "System has C1E enabled\n"); + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + mark_tsc_unstable("TSC halt in AMD C1E"); + printk(KERN_INFO "System has AMD C1E enabled\n"); + set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); } } diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 3b7a1ddcc0bc..31f40b24bf5d 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -55,6 +55,7 @@ #include <asm/tlbflush.h> #include <asm/cpu.h> #include <asm/kdebug.h> +#include <asm/idle.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); @@ -88,6 +89,7 @@ static void cpu_exit_clear(void) cpu_clear(cpu, cpu_callin_map); numa_remove_cpu(cpu); + c1e_remove_cpu(cpu); } /* We don't actually take CPU down, just spin without interrupts. */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71553b664e2a..e12e0e4dd256 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -93,6 +93,8 @@ DECLARE_PER_CPU(int, cpu_state); static inline void play_dead(void) { idle_task_exit(); + c1e_remove_cpu(raw_smp_processor_id()); + mb(); /* Ack it */ __get_cpu_var(cpu_state) = CPU_DEAD; diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 6ca515d6db54..edfb09f30479 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -235,7 +235,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry, const void *desc) { u32 *ldt_entry = (u32 *)desc; - vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); + vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); } static void vmi_load_sp0(struct tss_struct *tss, diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 0c029e8959c7..7766d36983fc 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -61,7 +61,7 @@ static void vsmp_irq_enable(void) native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); } -static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, +static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, unsigned long addr, unsigned len) { switch (type) { diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 0227694f7dab..8a5f1614a3d5 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -295,10 +295,12 @@ static void nmi_cpu_shutdown(void *dummy) static void nmi_shutdown(void) { - struct op_msrs *msrs = &get_cpu_var(cpu_msrs); + struct op_msrs *msrs; + nmi_enabled = 0; on_each_cpu(nmi_cpu_shutdown, NULL, 1); unregister_die_notifier(&profile_exceptions_nb); + msrs = &get_cpu_var(cpu_msrs); model->shutdown(msrs); free_msrs(); put_cpu_var(cpu_msrs); diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c index 0a5f6b2114c5..d672cfe7ca59 100644 --- a/drivers/accessibility/braille/braille_console.c +++ b/drivers/accessibility/braille/braille_console.c @@ -376,6 +376,8 @@ int braille_register_console(struct console *console, int index, console->flags |= CON_ENABLED; console->index = index; braille_co = console; + register_keyboard_notifier(&keyboard_notifier_block); + register_vt_notifier(&vt_notifier_block); return 0; } @@ -383,15 +385,8 @@ int braille_unregister_console(struct console *console) { if (braille_co != console) return -EINVAL; + unregister_keyboard_notifier(&keyboard_notifier_block); + unregister_vt_notifier(&vt_notifier_block); braille_co = NULL; return 0; } - -static int __init braille_init(void) -{ - register_keyboard_notifier(&keyboard_notifier_block); - register_vt_notifier(&vt_notifier_block); - return 0; -} - -console_initcall(braille_init); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 084109507c9f..8dd3336efd7e 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -165,8 +165,11 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) "firmware_node"); ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, "physical_node"); - if (acpi_dev->wakeup.flags.valid) + if (acpi_dev->wakeup.flags.valid) { device_set_wakeup_capable(dev, true); + device_set_wakeup_enable(dev, + acpi_dev->wakeup.state.enabled); + } } return 0; diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 4ebbba2b6b19..bf5b04de02d1 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c @@ -377,6 +377,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) return 0; } +static void physical_device_enable_wakeup(struct acpi_device *adev) +{ + struct device *dev = acpi_get_physical_device(adev->handle); + + if (dev && device_can_wakeup(dev)) + device_set_wakeup_enable(dev, adev->wakeup.state.enabled); +} + static ssize_t acpi_system_write_wakeup_device(struct file *file, const char __user * buffer, @@ -411,6 +419,7 @@ acpi_system_write_wakeup_device(struct file *file, } } if (found_dev) { + physical_device_enable_wakeup(found_dev); list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct @@ -428,6 +437,7 @@ acpi_system_write_wakeup_device(struct file *file, dev->pnp.bus_id, found_dev->pnp.bus_id); dev->wakeup.state.enabled = found_dev->wakeup.state.enabled; + physical_device_enable_wakeup(dev); } } } diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 1e1f3f3757ae..14601dc05e41 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -309,6 +309,8 @@ static void nv_nf2_freeze(struct ata_port *ap); static void nv_nf2_thaw(struct ata_port *ap); static void nv_ck804_freeze(struct ata_port *ap); static void nv_ck804_thaw(struct ata_port *ap); +static int nv_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); static int nv_adma_slave_config(struct scsi_device *sdev); static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); static void nv_adma_qc_prep(struct ata_queued_cmd *qc); @@ -403,28 +405,45 @@ static struct scsi_host_template nv_swncq_sht = { .slave_configure = nv_swncq_slave_config, }; -static struct ata_port_operations nv_generic_ops = { +/* OSDL bz3352 reports that some nv controllers can't determine device + * signature reliably and nv_hardreset is implemented to work around + * the problem. This was reported on nf3 and it's unclear whether any + * other controllers are affected. However, the workaround has been + * applied to all variants and there isn't much to gain by trying to + * find out exactly which ones are affected at this point especially + * because NV has moved over to ahci for newer controllers. + */ +static struct ata_port_operations nv_common_ops = { .inherits = &ata_bmdma_port_ops, - .hardreset = ATA_OP_NULL, + .hardreset = nv_hardreset, .scr_read = nv_scr_read, .scr_write = nv_scr_write, }; +/* OSDL bz11195 reports that link doesn't come online after hardreset + * on generic nv's and there have been several other similar reports + * on linux-ide. Disable hardreset for generic nv's. + */ +static struct ata_port_operations nv_generic_ops = { + .inherits = &nv_common_ops, + .hardreset = ATA_OP_NULL, +}; + static struct ata_port_operations nv_nf2_ops = { - .inherits = &nv_generic_ops, + .inherits = &nv_common_ops, .freeze = nv_nf2_freeze, .thaw = nv_nf2_thaw, }; static struct ata_port_operations nv_ck804_ops = { - .inherits = &nv_generic_ops, + .inherits = &nv_common_ops, .freeze = nv_ck804_freeze, .thaw = nv_ck804_thaw, .host_stop = nv_ck804_host_stop, }; static struct ata_port_operations nv_adma_ops = { - .inherits = &nv_generic_ops, + .inherits = &nv_common_ops, .check_atapi_dma = nv_adma_check_atapi_dma, .sff_tf_read = nv_adma_tf_read, @@ -448,7 +467,7 @@ static struct ata_port_operations nv_adma_ops = { }; static struct ata_port_operations nv_swncq_ops = { - .inherits = &nv_generic_ops, + .inherits = &nv_common_ops, .qc_defer = ata_std_qc_defer, .qc_prep = nv_swncq_qc_prep, @@ -1586,6 +1605,21 @@ static void nv_mcp55_thaw(struct ata_port *ap) ata_sff_thaw(ap); } +static int nv_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int rc; + + /* SATA hardreset fails to retrieve proper device signature on + * some controllers. Request follow up SRST. For more info, + * see http://bugzilla.kernel.org/show_bug.cgi?id=3352 + */ + rc = sata_sff_hardreset(link, class, deadline); + if (rc) + return rc; + return -EAGAIN; +} + static void nv_adma_error_handler(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 6a010681ecf3..29ae99817c60 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -104,6 +104,9 @@ static struct usb_device_id blacklist_table[] = { /* Broadcom BCM2046 */ { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET }, + /* Apple MacBook Pro with Broadcom chip */ + { USB_DEVICE(0x05ac, 0x820f), .driver_info = BTUSB_RESET }, + /* IBM/Lenovo ThinkPad with Broadcom chip */ { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, @@ -169,6 +172,7 @@ static struct usb_device_id blacklist_table[] = { struct btusb_data { struct hci_dev *hdev; struct usb_device *udev; + struct usb_interface *intf; struct usb_interface *isoc; spinlock_t lock; @@ -516,7 +520,7 @@ static int btusb_open(struct hci_dev *hdev) err = btusb_submit_intr_urb(hdev); if (err < 0) { - clear_bit(BTUSB_INTR_RUNNING, &hdev->flags); + clear_bit(BTUSB_INTR_RUNNING, &data->flags); clear_bit(HCI_RUNNING, &hdev->flags); } @@ -532,8 +536,10 @@ static int btusb_close(struct hci_dev *hdev) if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; + cancel_work_sync(&data->work); + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); - usb_kill_anchored_urbs(&data->intr_anchor); + usb_kill_anchored_urbs(&data->isoc_anchor); clear_bit(BTUSB_BULK_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->bulk_anchor); @@ -821,6 +827,7 @@ static int btusb_probe(struct usb_interface *intf, } data->udev = interface_to_usbdev(intf); + data->intf = intf; spin_lock_init(&data->lock); @@ -889,7 +896,7 @@ static int btusb_probe(struct usb_interface *intf, if (data->isoc) { err = usb_driver_claim_interface(&btusb_driver, - data->isoc, NULL); + data->isoc, data); if (err < 0) { hci_free_dev(hdev); kfree(data); @@ -921,13 +928,22 @@ static void btusb_disconnect(struct usb_interface *intf) hdev = data->hdev; - if (data->isoc) - usb_driver_release_interface(&btusb_driver, data->isoc); + __hci_dev_hold(hdev); - usb_set_intfdata(intf, NULL); + usb_set_intfdata(data->intf, NULL); + + if (data->isoc) + usb_set_intfdata(data->isoc, NULL); hci_unregister_dev(hdev); + if (intf == data->isoc) + usb_driver_release_interface(&btusb_driver, data->intf); + else if (data->isoc) + usb_driver_release_interface(&btusb_driver, data->isoc); + + __hci_dev_put(hdev); + hci_free_dev(hdev); } diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index daeb8f766971..e4dce8709541 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -695,13 +695,23 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line) { struct tty_driver *p, *res = NULL; int tty_line = 0; + int len; char *str; + for (str = name; *str; str++) + if ((*str >= '0' && *str <= '9') || *str == ',') + break; + if (!*str) + return NULL; + + len = str - name; + tty_line = simple_strtoul(str, &str, 10); + mutex_lock(&tty_mutex); /* Search through the tty devices to look for a match */ list_for_each_entry(p, &tty_drivers, tty_drivers) { - str = name + strlen(p->name); - tty_line = simple_strtoul(str, &str, 10); + if (strncmp(name, p->name, len) != 0) + continue; if (*str == ',') str++; if (*str == '\0') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 94df91771243..0778d99aea7c 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -364,7 +364,7 @@ static void dw_dma_tasklet(unsigned long data) int i; status_block = dma_readl(dw, RAW.BLOCK); - status_xfer = dma_readl(dw, RAW.BLOCK); + status_xfer = dma_readl(dw, RAW.XFER); status_err = dma_readl(dw, RAW.ERROR); dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index 22f6d5c00d80..0e7b1c6724aa 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -180,7 +180,7 @@ static const struct i2c_algorithm i2c_powermac_algorithm = { }; -static int i2c_powermac_remove(struct platform_device *dev) +static int __devexit i2c_powermac_remove(struct platform_device *dev) { struct i2c_adapter *adapter = platform_get_drvdata(dev); struct pmac_i2c_bus *bus = i2c_get_adapdata(adapter); @@ -200,7 +200,7 @@ static int i2c_powermac_remove(struct platform_device *dev) } -static int __devexit i2c_powermac_probe(struct platform_device *dev) +static int __devinit i2c_powermac_probe(struct platform_device *dev) { struct pmac_i2c_bus *bus = dev->dev.platform_data; struct device_node *parent = NULL; diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index af4491fa7e34..307d976c9b69 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -583,8 +583,10 @@ static int __init i2c_dev_init(void) goto out; i2c_dev_class = class_create(THIS_MODULE, "i2c-dev"); - if (IS_ERR(i2c_dev_class)) + if (IS_ERR(i2c_dev_class)) { + res = PTR_ERR(i2c_dev_class); goto out_unreg_chrdev; + } res = i2c_add_driver(&i2cdev_driver); if (res) diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index fc735ab08ff4..8e93a797c93d 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -292,6 +292,20 @@ config IDE_GENERIC tristate "generic/default IDE chipset support" depends on ALPHA || X86 || IA64 || M32R || MIPS help + This is the generic IDE driver. This driver attaches to the + fixed legacy ports (e.g. on PCs 0x1f0/0x170, 0x1e8/0x168 and + so on). Please note that if this driver is built into the + kernel or loaded before other ATA (IDE or libata) drivers + and the controller is located at legacy ports, this driver + may grab those ports and thus can prevent the controller + specific driver from attaching. + + Also, currently, IDE generic doesn't allow IRQ sharing + meaning that the IRQs it grabs won't be available to other + controllers sharing those IRQs which usually makes drivers + for those controllers fail. Generally, it's not a good idea + to load IDE generic driver on modern systems. + If unsure, say N. config BLK_DEV_PLATFORM diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 1bce84b56630..3833189144ed 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -2338,7 +2338,7 @@ static void idetape_get_inquiry_results(ide_drive_t *drive) { idetape_tape_t *tape = drive->driver_data; struct ide_atapi_pc pc; - char fw_rev[6], vendor_id[10], product_id[18]; + char fw_rev[4], vendor_id[8], product_id[16]; idetape_create_inquiry_cmd(&pc); if (idetape_queue_pc_tail(drive, &pc)) { @@ -2350,11 +2350,11 @@ static void idetape_get_inquiry_results(ide_drive_t *drive) memcpy(product_id, &pc.buf[16], 16); memcpy(fw_rev, &pc.buf[32], 4); - ide_fixstring(vendor_id, 10, 0); - ide_fixstring(product_id, 18, 0); - ide_fixstring(fw_rev, 6, 0); + ide_fixstring(vendor_id, 8, 0); + ide_fixstring(product_id, 16, 0); + ide_fixstring(fw_rev, 4, 0); - printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n", + printk(KERN_INFO "ide-tape: %s <-> %s: %.8s %.16s rev %.4s\n", drive->name, tape->name, vendor_id, product_id, fw_rev); } diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c index badf79fc9e3a..39c9ee995857 100644 --- a/drivers/ide/mips/swarm.c +++ b/drivers/ide/mips/swarm.c @@ -107,6 +107,7 @@ static int __devinit swarm_ide_probe(struct device *dev) base = ioremap(offset, size); + memset(&hw, 0, sizeof(hw)); for (i = 0; i <= 7; i++) hw.io_ports_array[i] = (unsigned long)(base + ((0x1f0 + i) << 5)); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 1b1df5cc4113..e9ca3cb57d52 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -404,7 +404,7 @@ static void path_rec_completion(int status, struct net_device *dev = path->dev; struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_ah *ah = NULL; - struct ipoib_ah *old_ah; + struct ipoib_ah *old_ah = NULL; struct ipoib_neigh *neigh, *tn; struct sk_buff_head skqueue; struct sk_buff *skb; @@ -428,12 +428,12 @@ static void path_rec_completion(int status, spin_lock_irqsave(&priv->lock, flags); - old_ah = path->ah; - path->ah = ah; - if (ah) { path->pathrec = *pathrec; + old_ah = path->ah; + path->ah = ah; + ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", ah, be16_to_cpu(pathrec->dlid), pathrec->sl); diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 18f4d7f6ce6d..2998a6ac9ae4 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -351,8 +351,9 @@ static int report_tp_state(struct bcm5974 *dev, int size) #define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300 #define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0 #define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01 +#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08 -static int bcm5974_wellspring_mode(struct bcm5974 *dev) +static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) { char *data = kmalloc(8, GFP_KERNEL); int retval = 0, size; @@ -377,7 +378,9 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev) } /* apply the mode switch */ - data[0] = BCM5974_WELLSPRING_MODE_VENDOR_VALUE; + data[0] = on ? + BCM5974_WELLSPRING_MODE_VENDOR_VALUE : + BCM5974_WELLSPRING_MODE_NORMAL_VALUE; /* write configuration */ size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), @@ -392,7 +395,8 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev) goto out; } - dprintk(2, "bcm5974: switched to wellspring mode.\n"); + dprintk(2, "bcm5974: switched to %s mode.\n", + on ? "wellspring" : "normal"); out: kfree(data); @@ -481,7 +485,7 @@ exit: */ static int bcm5974_start_traffic(struct bcm5974 *dev) { - if (bcm5974_wellspring_mode(dev)) { + if (bcm5974_wellspring_mode(dev, true)) { dprintk(1, "bcm5974: mode switch failed\n"); goto error; } @@ -504,6 +508,7 @@ static void bcm5974_pause_traffic(struct bcm5974 *dev) { usb_kill_urb(dev->tp_urb); usb_kill_urb(dev->bt_urb); + bcm5974_wellspring_mode(dev, false); } /* diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c index bf44f9d68342..c8b7e8a45c4d 100644 --- a/drivers/input/touchscreen/jornada720_ts.c +++ b/drivers/input/touchscreen/jornada720_ts.c @@ -119,8 +119,8 @@ static int __devinit jornada720_ts_probe(struct platform_device *pdev) input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; - input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); - input_dev->keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); + input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0); input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0); diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c index be0e12144b8b..34935155c1c0 100644 --- a/drivers/leds/leds-fsg.c +++ b/drivers/leds/leds-fsg.c @@ -161,6 +161,16 @@ static int fsg_led_probe(struct platform_device *pdev) { int ret; + /* Map the LED chip select address space */ + latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512); + if (!latch_address) { + ret = -ENOMEM; + goto failremap; + } + + latch_value = 0xffff; + *latch_address = latch_value; + ret = led_classdev_register(&pdev->dev, &fsg_wlan_led); if (ret < 0) goto failwlan; @@ -185,20 +195,8 @@ static int fsg_led_probe(struct platform_device *pdev) if (ret < 0) goto failring; - /* Map the LED chip select address space */ - latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512); - if (!latch_address) { - ret = -ENOMEM; - goto failremap; - } - - latch_value = 0xffff; - *latch_address = latch_value; - return ret; - failremap: - led_classdev_unregister(&fsg_ring_led); failring: led_classdev_unregister(&fsg_sync_led); failsync: @@ -210,14 +208,14 @@ static int fsg_led_probe(struct platform_device *pdev) failwan: led_classdev_unregister(&fsg_wlan_led); failwlan: + iounmap(latch_address); + failremap: return ret; } static int fsg_led_remove(struct platform_device *pdev) { - iounmap(latch_address); - led_classdev_unregister(&fsg_wlan_led); led_classdev_unregister(&fsg_wan_led); led_classdev_unregister(&fsg_sata_led); @@ -225,6 +223,8 @@ static int fsg_led_remove(struct platform_device *pdev) led_classdev_unregister(&fsg_sync_led); led_classdev_unregister(&fsg_ring_led); + iounmap(latch_address); + return 0; } diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 146c06972863..f508729123b5 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -248,11 +248,10 @@ static int __devinit pca955x_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct pca955x_led *pca955x; - int i; - int err = -ENODEV; struct pca955x_chipdef *chip; struct i2c_adapter *adapter; struct led_platform_data *pdata; + int i, err; chip = &pca955x_chipdefs[id->driver_data]; adapter = to_i2c_adapter(client->dev.parent); @@ -282,43 +281,41 @@ static int __devinit pca955x_probe(struct i2c_client *client, } } + pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL); + if (!pca955x) + return -ENOMEM; + + i2c_set_clientdata(client, pca955x); + for (i = 0; i < chip->bits; i++) { - pca955x = kzalloc(sizeof(struct pca955x_led), GFP_KERNEL); - if (!pca955x) { - err = -ENOMEM; - goto exit; - } + pca955x[i].chipdef = chip; + pca955x[i].client = client; + pca955x[i].led_num = i; - pca955x->chipdef = chip; - pca955x->client = client; - pca955x->led_num = i; /* Platform data can specify LED names and default triggers */ if (pdata) { if (pdata->leds[i].name) - snprintf(pca955x->name, 32, "pca955x:%s", - pdata->leds[i].name); + snprintf(pca955x[i].name, + sizeof(pca955x[i].name), "pca955x:%s", + pdata->leds[i].name); if (pdata->leds[i].default_trigger) - pca955x->led_cdev.default_trigger = + pca955x[i].led_cdev.default_trigger = pdata->leds[i].default_trigger; } else { - snprintf(pca955x->name, 32, "pca955x:%d", i); + snprintf(pca955x[i].name, sizeof(pca955x[i].name), + "pca955x:%d", i); } - spin_lock_init(&pca955x->lock); - pca955x->led_cdev.name = pca955x->name; - pca955x->led_cdev.brightness_set = - pca955x_led_set; + spin_lock_init(&pca955x[i].lock); - /* - * Client data is a pointer to the _first_ pca955x_led - * struct - */ - if (i == 0) - i2c_set_clientdata(client, pca955x); + pca955x[i].led_cdev.name = pca955x[i].name; + pca955x[i].led_cdev.brightness_set = pca955x_led_set; - INIT_WORK(&(pca955x->work), pca955x_led_work); + INIT_WORK(&pca955x[i].work, pca955x_led_work); - led_classdev_register(&client->dev, &(pca955x->led_cdev)); + err = led_classdev_register(&client->dev, &pca955x[i].led_cdev); + if (err < 0) + goto exit; } /* Turn off LEDs */ @@ -336,23 +333,32 @@ static int __devinit pca955x_probe(struct i2c_client *client, pca955x_write_psc(client, 1, 0); return 0; + exit: + while (i--) { + led_classdev_unregister(&pca955x[i].led_cdev); + cancel_work_sync(&pca955x[i].work); + } + + kfree(pca955x); + i2c_set_clientdata(client, NULL); + return err; } static int __devexit pca955x_remove(struct i2c_client *client) { struct pca955x_led *pca955x = i2c_get_clientdata(client); - int leds = pca955x->chipdef->bits; int i; - for (i = 0; i < leds; i++) { - led_classdev_unregister(&(pca955x->led_cdev)); - cancel_work_sync(&(pca955x->work)); - kfree(pca955x); - pca955x = pca955x + 1; + for (i = 0; i < pca955x->chipdef->bits; i++) { + led_classdev_unregister(&pca955x[i].led_cdev); + cancel_work_sync(&pca955x[i].work); } + kfree(pca955x); + i2c_set_clientdata(client, NULL); + return 0; } diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 71dd65aa31b6..c2fcf28b4c70 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -63,6 +63,7 @@ struct multipath { const char *hw_handler_name; struct work_struct activate_path; + struct pgpath *pgpath_to_activate; unsigned nr_priority_groups; struct list_head priority_groups; unsigned pg_init_required; /* pg_init needs calling? */ @@ -146,6 +147,7 @@ static struct priority_group *alloc_priority_group(void) static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) { + unsigned long flags; struct pgpath *pgpath, *tmp; struct multipath *m = ti->private; @@ -154,6 +156,10 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) if (m->hw_handler_name) scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); dm_put_device(ti, pgpath->path.dev); + spin_lock_irqsave(&m->lock, flags); + if (m->pgpath_to_activate == pgpath) + m->pgpath_to_activate = NULL; + spin_unlock_irqrestore(&m->lock, flags); free_pgpath(pgpath); } } @@ -421,6 +427,7 @@ static void process_queued_ios(struct work_struct *work) __choose_pgpath(m); pgpath = m->current_pgpath; + m->pgpath_to_activate = m->current_pgpath; if ((pgpath && !m->queue_io) || (!pgpath && !m->queue_if_no_path)) @@ -1093,8 +1100,15 @@ static void activate_path(struct work_struct *work) int ret; struct multipath *m = container_of(work, struct multipath, activate_path); - struct dm_path *path = &m->current_pgpath->path; + struct dm_path *path; + unsigned long flags; + spin_lock_irqsave(&m->lock, flags); + path = &m->pgpath_to_activate->path; + m->pgpath_to_activate = NULL; + spin_unlock_irqrestore(&m->lock, flags); + if (!path) + return; ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); pg_init_done(path, ret); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bca448e11878..ace998ce59f6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -837,12 +837,14 @@ static int dm_merge_bvec(struct request_queue *q, struct dm_table *map = dm_get_table(md); struct dm_target *ti; sector_t max_sectors; - int max_size; + int max_size = 0; if (unlikely(!map)) - return 0; + goto out; ti = dm_table_find_target(map, bvm->bi_sector); + if (!dm_target_is_valid(ti)) + goto out_table; /* * Find maximum amount of I/O that won't need splitting @@ -861,14 +863,16 @@ static int dm_merge_bvec(struct request_queue *q, if (max_size && ti->type->merge) max_size = ti->type->merge(ti, bvm, biovec, max_size); +out_table: + dm_table_put(map); + +out: /* * Always allow an entire first page */ if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) max_size = biovec->bv_len; - dm_table_put(map); - return max_size; } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 10c44d3fe01a..68dc8d9eb24e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -21,7 +21,7 @@ config MFD_SM501 config MFD_SM501_GPIO bool "Export GPIO via GPIO layer" - depends on MFD_SM501 && HAVE_GPIO_LIB + depends on MFD_SM501 && GPIOLIB ---help--- This option uses the gpio library layer to export the 64 GPIO lines on the SM501. The platform data is used to supply the @@ -29,7 +29,7 @@ config MFD_SM501_GPIO config MFD_ASIC3 bool "Support for Compaq ASIC3" - depends on GENERIC_HARDIRQS && HAVE_GPIO_LIB && ARM + depends on GENERIC_HARDIRQS && GPIOLIB && ARM ---help--- This driver supports the ASIC3 multifunction chip found on many PDAs (mainly iPAQ and HTC based ones) diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index bc2a807f210d..ba5aa2008273 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -312,7 +312,6 @@ static int __init asic3_irq_probe(struct platform_device *pdev) struct asic3 *asic = platform_get_drvdata(pdev); unsigned long clksel = 0; unsigned int irq, irq_base; - int map_size; int ret; ret = platform_get_irq(pdev, 0); @@ -534,6 +533,7 @@ static int __init asic3_probe(struct platform_device *pdev) struct asic3 *asic; struct resource *mem; unsigned long clksel; + int map_size; int ret = 0; asic = kzalloc(sizeof(struct asic3), GFP_KERNEL); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index ea8d7a3490d9..1ce21d4c8608 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -114,6 +114,17 @@ config MMC_ATMELMCI If unsure, say N. +config MMC_ATMELMCI_DMA + bool "Atmel MCI DMA support (EXPERIMENTAL)" + depends on MMC_ATMELMCI && DMA_ENGINE && EXPERIMENTAL + help + Say Y here to have the Atmel MCI driver use a DMA engine to + do data transfers and thus increase the throughput and + reduce the CPU utilization. Note that this is highly + experimental and may cause the driver to lock up. + + If unsure, say N. + config MMC_IMX tristate "Motorola i.MX Multimedia Card Interface support" depends on ARCH_IMX diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h index 26bd80e65031..b58364ed6bba 100644 --- a/drivers/mmc/host/atmel-mci-regs.h +++ b/drivers/mmc/host/atmel-mci-regs.h @@ -25,8 +25,10 @@ #define MCI_SDCR 0x000c /* SD Card / SDIO */ # define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ # define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ -# define MCI_SDCBUS_1BIT ( 0 << 7) /* 1-bit data bus */ -# define MCI_SDCBUS_4BIT ( 1 << 7) /* 4-bit data bus */ +# define MCI_SDCSEL_MASK ( 3 << 0) +# define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */ +# define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */ +# define MCI_SDCBUS_MASK ( 3 << 6) #define MCI_ARGR 0x0010 /* Command Argument */ #define MCI_CMDR 0x0014 /* Command */ # define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 917035e16da4..7a3f2436b011 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -11,6 +11,8 @@ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/init.h> @@ -33,64 +35,178 @@ #include "atmel-mci-regs.h" #define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) +#define ATMCI_DMA_THRESHOLD 16 enum { EVENT_CMD_COMPLETE = 0, - EVENT_DATA_ERROR, - EVENT_DATA_COMPLETE, - EVENT_STOP_SENT, - EVENT_STOP_COMPLETE, EVENT_XFER_COMPLETE, + EVENT_DATA_COMPLETE, + EVENT_DATA_ERROR, +}; + +enum atmel_mci_state { + STATE_IDLE = 0, + STATE_SENDING_CMD, + STATE_SENDING_DATA, + STATE_DATA_BUSY, + STATE_SENDING_STOP, + STATE_DATA_ERROR, +}; + +struct atmel_mci_dma { +#ifdef CONFIG_MMC_ATMELMCI_DMA + struct dma_client client; + struct dma_chan *chan; + struct dma_async_tx_descriptor *data_desc; +#endif }; +/** + * struct atmel_mci - MMC controller state shared between all slots + * @lock: Spinlock protecting the queue and associated data. + * @regs: Pointer to MMIO registers. + * @sg: Scatterlist entry currently being processed by PIO code, if any. + * @pio_offset: Offset into the current scatterlist entry. + * @cur_slot: The slot which is currently using the controller. + * @mrq: The request currently being processed on @cur_slot, + * or NULL if the controller is idle. + * @cmd: The command currently being sent to the card, or NULL. + * @data: The data currently being transferred, or NULL if no data + * transfer is in progress. + * @dma: DMA client state. + * @data_chan: DMA channel being used for the current data transfer. + * @cmd_status: Snapshot of SR taken upon completion of the current + * command. Only valid when EVENT_CMD_COMPLETE is pending. + * @data_status: Snapshot of SR taken upon completion of the current + * data transfer. Only valid when EVENT_DATA_COMPLETE or + * EVENT_DATA_ERROR is pending. + * @stop_cmdr: Value to be loaded into CMDR when the stop command is + * to be sent. + * @tasklet: Tasklet running the request state machine. + * @pending_events: Bitmask of events flagged by the interrupt handler + * to be processed by the tasklet. + * @completed_events: Bitmask of events which the state machine has + * processed. + * @state: Tasklet state. + * @queue: List of slots waiting for access to the controller. + * @need_clock_update: Update the clock rate before the next request. + * @need_reset: Reset controller before next request. + * @mode_reg: Value of the MR register. + * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus + * rate and timeout calculations. + * @mapbase: Physical address of the MMIO registers. + * @mck: The peripheral bus clock hooked up to the MMC controller. + * @pdev: Platform device associated with the MMC controller. + * @slot: Slots sharing this MMC controller. + * + * Locking + * ======= + * + * @lock is a softirq-safe spinlock protecting @queue as well as + * @cur_slot, @mrq and @state. These must always be updated + * at the same time while holding @lock. + * + * @lock also protects mode_reg and need_clock_update since these are + * used to synchronize mode register updates with the queue + * processing. + * + * The @mrq field of struct atmel_mci_slot is also protected by @lock, + * and must always be written at the same time as the slot is added to + * @queue. + * + * @pending_events and @completed_events are accessed using atomic bit + * operations, so they don't need any locking. + * + * None of the fields touched by the interrupt handler need any + * locking. However, ordering is important: Before EVENT_DATA_ERROR or + * EVENT_DATA_COMPLETE is set in @pending_events, all data-related + * interrupts must be disabled and @data_status updated with a + * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the + * CMDRDY interupt must be disabled and @cmd_status updated with a + * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the + * bytes_xfered field of @data must be written. This is ensured by + * using barriers. + */ struct atmel_mci { - struct mmc_host *mmc; + spinlock_t lock; void __iomem *regs; struct scatterlist *sg; unsigned int pio_offset; + struct atmel_mci_slot *cur_slot; struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_data *data; + struct atmel_mci_dma dma; + struct dma_chan *data_chan; + u32 cmd_status; u32 data_status; - u32 stop_status; u32 stop_cmdr; - u32 mode_reg; - u32 sdc_reg; - struct tasklet_struct tasklet; unsigned long pending_events; unsigned long completed_events; + enum atmel_mci_state state; + struct list_head queue; - int present; - int detect_pin; - int wp_pin; - - /* For detect pin debouncing */ - struct timer_list detect_timer; - + bool need_clock_update; + bool need_reset; + u32 mode_reg; unsigned long bus_hz; unsigned long mapbase; struct clk *mck; struct platform_device *pdev; + + struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS]; +}; + +/** + * struct atmel_mci_slot - MMC slot state + * @mmc: The mmc_host representing this slot. + * @host: The MMC controller this slot is using. + * @sdc_reg: Value of SDCR to be written before using this slot. + * @mrq: mmc_request currently being processed or waiting to be + * processed, or NULL when the slot is idle. + * @queue_node: List node for placing this node in the @queue list of + * &struct atmel_mci. + * @clock: Clock rate configured by set_ios(). Protected by host->lock. + * @flags: Random state bits associated with the slot. + * @detect_pin: GPIO pin used for card detection, or negative if not + * available. + * @wp_pin: GPIO pin used for card write protect sending, or negative + * if not available. + * @detect_timer: Timer used for debouncing @detect_pin interrupts. + */ +struct atmel_mci_slot { + struct mmc_host *mmc; + struct atmel_mci *host; + + u32 sdc_reg; + + struct mmc_request *mrq; + struct list_head queue_node; + + unsigned int clock; + unsigned long flags; +#define ATMCI_CARD_PRESENT 0 +#define ATMCI_CARD_NEED_INIT 1 +#define ATMCI_SHUTDOWN 2 + + int detect_pin; + int wp_pin; + + struct timer_list detect_timer; }; -#define atmci_is_completed(host, event) \ - test_bit(event, &host->completed_events) #define atmci_test_and_clear_pending(host, event) \ test_and_clear_bit(event, &host->pending_events) -#define atmci_test_and_set_completed(host, event) \ - test_and_set_bit(event, &host->completed_events) #define atmci_set_completed(host, event) \ set_bit(event, &host->completed_events) #define atmci_set_pending(host, event) \ set_bit(event, &host->pending_events) -#define atmci_clear_pending(host, event) \ - clear_bit(event, &host->pending_events) /* * The debugfs stuff below is mostly optimized away when @@ -98,14 +214,15 @@ struct atmel_mci { */ static int atmci_req_show(struct seq_file *s, void *v) { - struct atmel_mci *host = s->private; - struct mmc_request *mrq = host->mrq; + struct atmel_mci_slot *slot = s->private; + struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_command *stop; struct mmc_data *data; /* Make sure we get a consistent snapshot */ - spin_lock_irq(&host->mmc->lock); + spin_lock_bh(&slot->host->lock); + mrq = slot->mrq; if (mrq) { cmd = mrq->cmd; @@ -130,7 +247,7 @@ static int atmci_req_show(struct seq_file *s, void *v) stop->resp[2], stop->error); } - spin_unlock_irq(&host->mmc->lock); + spin_unlock_bh(&slot->host->lock); return 0; } @@ -193,12 +310,16 @@ static int atmci_regs_show(struct seq_file *s, void *v) if (!buf) return -ENOMEM; - /* Grab a more or less consistent snapshot */ - spin_lock_irq(&host->mmc->lock); + /* + * Grab a more or less consistent snapshot. Note that we're + * not disabling interrupts, so IMR and SR may not be + * consistent. + */ + spin_lock_bh(&host->lock); clk_enable(host->mck); memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); clk_disable(host->mck); - spin_unlock_irq(&host->mmc->lock); + spin_unlock_bh(&host->lock); seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", buf[MCI_MR / 4], @@ -236,13 +357,13 @@ static const struct file_operations atmci_regs_fops = { .release = single_release, }; -static void atmci_init_debugfs(struct atmel_mci *host) +static void atmci_init_debugfs(struct atmel_mci_slot *slot) { - struct mmc_host *mmc; - struct dentry *root; - struct dentry *node; + struct mmc_host *mmc = slot->mmc; + struct atmel_mci *host = slot->host; + struct dentry *root; + struct dentry *node; - mmc = host->mmc; root = mmc->debugfs_root; if (!root) return; @@ -254,7 +375,11 @@ static void atmci_init_debugfs(struct atmel_mci *host) if (!node) goto err; - node = debugfs_create_file("req", S_IRUSR, root, host, &atmci_req_fops); + node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops); + if (!node) + goto err; + + node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); if (!node) goto err; @@ -271,25 +396,7 @@ static void atmci_init_debugfs(struct atmel_mci *host) return; err: - dev_err(&host->pdev->dev, - "failed to initialize debugfs for controller\n"); -} - -static void atmci_enable(struct atmel_mci *host) -{ - clk_enable(host->mck); - mci_writel(host, CR, MCI_CR_MCIEN); - mci_writel(host, MR, host->mode_reg); - mci_writel(host, SDCR, host->sdc_reg); -} - -static void atmci_disable(struct atmel_mci *host) -{ - mci_writel(host, CR, MCI_CR_SWRST); - - /* Stall until write is complete, then disable the bus clock */ - mci_readl(host, SR); - clk_disable(host->mck); + dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); } static inline unsigned int ns_to_clocks(struct atmel_mci *host, @@ -299,7 +406,7 @@ static inline unsigned int ns_to_clocks(struct atmel_mci *host, } static void atmci_set_timeout(struct atmel_mci *host, - struct mmc_data *data) + struct atmel_mci_slot *slot, struct mmc_data *data) { static unsigned dtomul_to_shift[] = { 0, 4, 7, 8, 10, 12, 16, 20 @@ -322,7 +429,7 @@ static void atmci_set_timeout(struct atmel_mci *host, dtocyc = 15; } - dev_vdbg(&host->mmc->class_dev, "setting timeout to %u cycles\n", + dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", dtocyc << dtomul_to_shift[dtomul]); mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); } @@ -375,15 +482,12 @@ static u32 atmci_prepare_command(struct mmc_host *mmc, } static void atmci_start_command(struct atmel_mci *host, - struct mmc_command *cmd, - u32 cmd_flags) + struct mmc_command *cmd, u32 cmd_flags) { - /* Must read host->cmd after testing event flags */ - smp_rmb(); WARN_ON(host->cmd); host->cmd = cmd; - dev_vdbg(&host->mmc->class_dev, + dev_vdbg(&host->pdev->dev, "start command: ARGR=0x%08x CMDR=0x%08x\n", cmd->arg, cmd_flags); @@ -391,34 +495,157 @@ static void atmci_start_command(struct atmel_mci *host, mci_writel(host, CMDR, cmd_flags); } -static void send_stop_cmd(struct mmc_host *mmc, struct mmc_data *data) +static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) { - struct atmel_mci *host = mmc_priv(mmc); - atmci_start_command(host, data->stop, host->stop_cmdr); mci_writel(host, IER, MCI_CMDRDY); } -static void atmci_request_end(struct mmc_host *mmc, struct mmc_request *mrq) +#ifdef CONFIG_MMC_ATMELMCI_DMA +static void atmci_dma_cleanup(struct atmel_mci *host) { - struct atmel_mci *host = mmc_priv(mmc); + struct mmc_data *data = host->data; - WARN_ON(host->cmd || host->data); - host->mrq = NULL; + dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, + ((data->flags & MMC_DATA_WRITE) + ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); +} + +static void atmci_stop_dma(struct atmel_mci *host) +{ + struct dma_chan *chan = host->data_chan; + + if (chan) { + chan->device->device_terminate_all(chan); + atmci_dma_cleanup(host); + } else { + /* Data transfer was stopped by the interrupt handler */ + atmci_set_pending(host, EVENT_XFER_COMPLETE); + mci_writel(host, IER, MCI_NOTBUSY); + } +} + +/* This function is called by the DMA driver from tasklet context. */ +static void atmci_dma_complete(void *arg) +{ + struct atmel_mci *host = arg; + struct mmc_data *data = host->data; + + dev_vdbg(&host->pdev->dev, "DMA complete\n"); + + atmci_dma_cleanup(host); + + /* + * If the card was removed, data will be NULL. No point trying + * to send the stop command or waiting for NBUSY in this case. + */ + if (data) { + atmci_set_pending(host, EVENT_XFER_COMPLETE); + tasklet_schedule(&host->tasklet); + + /* + * Regardless of what the documentation says, we have + * to wait for NOTBUSY even after block read + * operations. + * + * When the DMA transfer is complete, the controller + * may still be reading the CRC from the card, i.e. + * the data transfer is still in progress and we + * haven't seen all the potential error bits yet. + * + * The interrupt handler will schedule a different + * tasklet to finish things up when the data transfer + * is completely done. + * + * We may not complete the mmc request here anyway + * because the mmc layer may call back and cause us to + * violate the "don't submit new operations from the + * completion callback" rule of the dma engine + * framework. + */ + mci_writel(host, IER, MCI_NOTBUSY); + } +} + +static int +atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) +{ + struct dma_chan *chan; + struct dma_async_tx_descriptor *desc; + struct scatterlist *sg; + unsigned int i; + enum dma_data_direction direction; + + /* + * We don't do DMA on "complex" transfers, i.e. with + * non-word-aligned buffers or lengths. Also, we don't bother + * with all the DMA setup overhead for short transfers. + */ + if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) + return -EINVAL; + if (data->blksz & 3) + return -EINVAL; + + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->offset & 3 || sg->length & 3) + return -EINVAL; + } + + /* If we don't have a channel, we can't do DMA */ + chan = host->dma.chan; + if (chan) { + dma_chan_get(chan); + host->data_chan = chan; + } + + if (!chan) + return -ENODEV; + + if (data->flags & MMC_DATA_READ) + direction = DMA_FROM_DEVICE; + else + direction = DMA_TO_DEVICE; + + desc = chan->device->device_prep_slave_sg(chan, + data->sg, data->sg_len, direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -ENOMEM; - atmci_disable(host); + host->dma.data_desc = desc; + desc->callback = atmci_dma_complete; + desc->callback_param = host; + desc->tx_submit(desc); - mmc_request_done(mmc, mrq); + /* Go! */ + chan->device->device_issue_pending(chan); + + return 0; +} + +#else /* CONFIG_MMC_ATMELMCI_DMA */ + +static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) +{ + return -ENOSYS; } +static void atmci_stop_dma(struct atmel_mci *host) +{ + /* Data transfer was stopped by the interrupt handler */ + atmci_set_pending(host, EVENT_XFER_COMPLETE); + mci_writel(host, IER, MCI_NOTBUSY); +} + +#endif /* CONFIG_MMC_ATMELMCI_DMA */ + /* * Returns a mask of interrupt flags to be enabled after the whole * request has been prepared. */ -static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) +static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) { - struct atmel_mci *host = mmc_priv(mmc); - u32 iflags; + u32 iflags; data->error = -EINPROGRESS; @@ -426,75 +653,89 @@ static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) host->sg = NULL; host->data = data; - mci_writel(host, BLKR, MCI_BCNT(data->blocks) - | MCI_BLKLEN(data->blksz)); - dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n", - MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); - iflags = ATMCI_DATA_ERROR_FLAGS; - host->sg = data->sg; - host->pio_offset = 0; - if (data->flags & MMC_DATA_READ) - iflags |= MCI_RXRDY; - else - iflags |= MCI_TXRDY; + if (atmci_submit_data_dma(host, data)) { + host->data_chan = NULL; + + /* + * Errata: MMC data write operation with less than 12 + * bytes is impossible. + * + * Errata: MCI Transmit Data Register (TDR) FIFO + * corruption when length is not multiple of 4. + */ + if (data->blocks * data->blksz < 12 + || (data->blocks * data->blksz) & 3) + host->need_reset = true; + + host->sg = data->sg; + host->pio_offset = 0; + if (data->flags & MMC_DATA_READ) + iflags |= MCI_RXRDY; + else + iflags |= MCI_TXRDY; + } return iflags; } -static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) +static void atmci_start_request(struct atmel_mci *host, + struct atmel_mci_slot *slot) { - struct atmel_mci *host = mmc_priv(mmc); - struct mmc_data *data; + struct mmc_request *mrq; struct mmc_command *cmd; + struct mmc_data *data; u32 iflags; - u32 cmdflags = 0; - - iflags = mci_readl(host, IMR); - if (iflags) - dev_warn(&mmc->class_dev, "WARNING: IMR=0x%08x\n", - mci_readl(host, IMR)); - - WARN_ON(host->mrq != NULL); - - /* - * We may "know" the card is gone even though there's still an - * electrical connection. If so, we really need to communicate - * this to the MMC core since there won't be any more - * interrupts as the card is completely removed. Otherwise, - * the MMC core might believe the card is still there even - * though the card was just removed very slowly. - */ - if (!host->present) { - mrq->cmd->error = -ENOMEDIUM; - mmc_request_done(mmc, mrq); - return; - } + u32 cmdflags; + mrq = slot->mrq; + host->cur_slot = slot; host->mrq = mrq; + host->pending_events = 0; host->completed_events = 0; + host->data_status = 0; - atmci_enable(host); + if (host->need_reset) { + mci_writel(host, CR, MCI_CR_SWRST); + mci_writel(host, CR, MCI_CR_MCIEN); + mci_writel(host, MR, host->mode_reg); + host->need_reset = false; + } + mci_writel(host, SDCR, slot->sdc_reg); - /* We don't support multiple blocks of weird lengths. */ + iflags = mci_readl(host, IMR); + if (iflags) + dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", + iflags); + + if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { + /* Send init sequence (74 clock cycles) */ + mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); + while (!(mci_readl(host, SR) & MCI_CMDRDY)) + cpu_relax(); + } data = mrq->data; if (data) { - if (data->blocks > 1 && data->blksz & 3) - goto fail; - atmci_set_timeout(host, data); + atmci_set_timeout(host, slot, data); + + /* Must set block count/size before sending command */ + mci_writel(host, BLKR, MCI_BCNT(data->blocks) + | MCI_BLKLEN(data->blksz)); + dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", + MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); } iflags = MCI_CMDRDY; cmd = mrq->cmd; - cmdflags = atmci_prepare_command(mmc, cmd); + cmdflags = atmci_prepare_command(slot->mmc, cmd); atmci_start_command(host, cmd, cmdflags); if (data) - iflags |= atmci_submit_data(mmc, data); + iflags |= atmci_submit_data(host, data); if (mrq->stop) { - host->stop_cmdr = atmci_prepare_command(mmc, mrq->stop); + host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); host->stop_cmdr |= MCI_CMDR_STOP_XFER; if (!(data->flags & MMC_DATA_WRITE)) host->stop_cmdr |= MCI_CMDR_TRDIR_READ; @@ -511,59 +752,156 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) * prepared yet.) */ mci_writel(host, IER, iflags); +} - return; +static void atmci_queue_request(struct atmel_mci *host, + struct atmel_mci_slot *slot, struct mmc_request *mrq) +{ + dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", + host->state); + + spin_lock_bh(&host->lock); + slot->mrq = mrq; + if (host->state == STATE_IDLE) { + host->state = STATE_SENDING_CMD; + atmci_start_request(host, slot); + } else { + list_add_tail(&slot->queue_node, &host->queue); + } + spin_unlock_bh(&host->lock); +} -fail: - atmci_disable(host); - host->mrq = NULL; - mrq->cmd->error = -EINVAL; - mmc_request_done(mmc, mrq); +static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct atmel_mci_slot *slot = mmc_priv(mmc); + struct atmel_mci *host = slot->host; + struct mmc_data *data; + + WARN_ON(slot->mrq); + + /* + * We may "know" the card is gone even though there's still an + * electrical connection. If so, we really need to communicate + * this to the MMC core since there won't be any more + * interrupts as the card is completely removed. Otherwise, + * the MMC core might believe the card is still there even + * though the card was just removed very slowly. + */ + if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) { + mrq->cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, mrq); + return; + } + + /* We don't support multiple blocks of weird lengths. */ + data = mrq->data; + if (data && data->blocks > 1 && data->blksz & 3) { + mrq->cmd->error = -EINVAL; + mmc_request_done(mmc, mrq); + } + + atmci_queue_request(host, slot, mrq); } static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { - struct atmel_mci *host = mmc_priv(mmc); + struct atmel_mci_slot *slot = mmc_priv(mmc); + struct atmel_mci *host = slot->host; + unsigned int i; + + slot->sdc_reg &= ~MCI_SDCBUS_MASK; + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + slot->sdc_reg |= MCI_SDCBUS_1BIT; + break; + case MMC_BUS_WIDTH_4: + slot->sdc_reg = MCI_SDCBUS_4BIT; + break; + } if (ios->clock) { + unsigned int clock_min = ~0U; u32 clkdiv; - /* Set clock rate */ - clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * ios->clock) - 1; + spin_lock_bh(&host->lock); + if (!host->mode_reg) { + clk_enable(host->mck); + mci_writel(host, CR, MCI_CR_SWRST); + mci_writel(host, CR, MCI_CR_MCIEN); + } + + /* + * Use mirror of ios->clock to prevent race with mmc + * core ios update when finding the minimum. + */ + slot->clock = ios->clock; + for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { + if (host->slot[i] && host->slot[i]->clock + && host->slot[i]->clock < clock_min) + clock_min = host->slot[i]->clock; + } + + /* Calculate clock divider */ + clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; if (clkdiv > 255) { dev_warn(&mmc->class_dev, "clock %u too slow; using %lu\n", - ios->clock, host->bus_hz / (2 * 256)); + clock_min, host->bus_hz / (2 * 256)); clkdiv = 255; } + /* + * WRPROOF and RDPROOF prevent overruns/underruns by + * stopping the clock when the FIFO is full/empty. + * This state is not expected to last for long. + */ host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF | MCI_MR_RDPROOF; - } - switch (ios->bus_width) { - case MMC_BUS_WIDTH_1: - host->sdc_reg = 0; - break; - case MMC_BUS_WIDTH_4: - host->sdc_reg = MCI_SDCBUS_4BIT; - break; + if (list_empty(&host->queue)) + mci_writel(host, MR, host->mode_reg); + else + host->need_clock_update = true; + + spin_unlock_bh(&host->lock); + } else { + bool any_slot_active = false; + + spin_lock_bh(&host->lock); + slot->clock = 0; + for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { + if (host->slot[i] && host->slot[i]->clock) { + any_slot_active = true; + break; + } + } + if (!any_slot_active) { + mci_writel(host, CR, MCI_CR_MCIDIS); + if (host->mode_reg) { + mci_readl(host, MR); + clk_disable(host->mck); + } + host->mode_reg = 0; + } + spin_unlock_bh(&host->lock); } switch (ios->power_mode) { - case MMC_POWER_ON: - /* Send init sequence (74 clock cycles) */ - atmci_enable(host); - mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); - while (!(mci_readl(host, SR) & MCI_CMDRDY)) - cpu_relax(); - atmci_disable(host); + case MMC_POWER_UP: + set_bit(ATMCI_CARD_NEED_INIT, &slot->flags); break; default: /* * TODO: None of the currently available AVR32-based * boards allow MMC power to be turned off. Implement * power control when this can be tested properly. + * + * We also need to hook this into the clock management + * somehow so that newly inserted cards aren't + * subjected to a fast clock before we have a chance + * to figure out what the maximum rate is. Currently, + * there's no way to avoid this, and there never will + * be for boards that don't support power control. */ break; } @@ -571,31 +909,82 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) static int atmci_get_ro(struct mmc_host *mmc) { - int read_only = 0; - struct atmel_mci *host = mmc_priv(mmc); + int read_only = -ENOSYS; + struct atmel_mci_slot *slot = mmc_priv(mmc); - if (gpio_is_valid(host->wp_pin)) { - read_only = gpio_get_value(host->wp_pin); + if (gpio_is_valid(slot->wp_pin)) { + read_only = gpio_get_value(slot->wp_pin); dev_dbg(&mmc->class_dev, "card is %s\n", read_only ? "read-only" : "read-write"); - } else { - dev_dbg(&mmc->class_dev, - "no pin for checking read-only switch." - " Assuming write-enable.\n"); } return read_only; } -static struct mmc_host_ops atmci_ops = { +static int atmci_get_cd(struct mmc_host *mmc) +{ + int present = -ENOSYS; + struct atmel_mci_slot *slot = mmc_priv(mmc); + + if (gpio_is_valid(slot->detect_pin)) { + present = !gpio_get_value(slot->detect_pin); + dev_dbg(&mmc->class_dev, "card is %spresent\n", + present ? "" : "not "); + } + + return present; +} + +static const struct mmc_host_ops atmci_ops = { .request = atmci_request, .set_ios = atmci_set_ios, .get_ro = atmci_get_ro, + .get_cd = atmci_get_cd, }; +/* Called with host->lock held */ +static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) + __releases(&host->lock) + __acquires(&host->lock) +{ + struct atmel_mci_slot *slot = NULL; + struct mmc_host *prev_mmc = host->cur_slot->mmc; + + WARN_ON(host->cmd || host->data); + + /* + * Update the MMC clock rate if necessary. This may be + * necessary if set_ios() is called when a different slot is + * busy transfering data. + */ + if (host->need_clock_update) + mci_writel(host, MR, host->mode_reg); + + host->cur_slot->mrq = NULL; + host->mrq = NULL; + if (!list_empty(&host->queue)) { + slot = list_entry(host->queue.next, + struct atmel_mci_slot, queue_node); + list_del(&slot->queue_node); + dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", + mmc_hostname(slot->mmc)); + host->state = STATE_SENDING_CMD; + atmci_start_request(host, slot); + } else { + dev_vdbg(&host->pdev->dev, "list empty\n"); + host->state = STATE_IDLE; + } + + spin_unlock(&host->lock); + mmc_request_done(prev_mmc, mrq); + spin_lock(&host->lock); +} + static void atmci_command_complete(struct atmel_mci *host, - struct mmc_command *cmd, u32 status) + struct mmc_command *cmd) { + u32 status = host->cmd_status; + /* Read the response from the card (up to 16 bytes) */ cmd->resp[0] = mci_readl(host, RSPR); cmd->resp[1] = mci_readl(host, RSPR); @@ -612,11 +1001,12 @@ static void atmci_command_complete(struct atmel_mci *host, cmd->error = 0; if (cmd->error) { - dev_dbg(&host->mmc->class_dev, + dev_dbg(&host->pdev->dev, "command error: status=0x%08x\n", status); if (cmd->data) { host->data = NULL; + atmci_stop_dma(host); mci_writel(host, IDR, MCI_NOTBUSY | MCI_TXRDY | MCI_RXRDY | ATMCI_DATA_ERROR_FLAGS); @@ -626,146 +1016,222 @@ static void atmci_command_complete(struct atmel_mci *host, static void atmci_detect_change(unsigned long data) { - struct atmel_mci *host = (struct atmel_mci *)data; - struct mmc_request *mrq = host->mrq; - int present; + struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data; + bool present; + bool present_old; /* - * atmci_remove() sets detect_pin to -1 before freeing the - * interrupt. We must not re-enable the interrupt if it has - * been freed. + * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before + * freeing the interrupt. We must not re-enable the interrupt + * if it has been freed, and if we're shutting down, it + * doesn't really matter whether the card is present or not. */ smp_rmb(); - if (!gpio_is_valid(host->detect_pin)) + if (test_bit(ATMCI_SHUTDOWN, &slot->flags)) return; - enable_irq(gpio_to_irq(host->detect_pin)); - present = !gpio_get_value(host->detect_pin); + enable_irq(gpio_to_irq(slot->detect_pin)); + present = !gpio_get_value(slot->detect_pin); + present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); + + dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", + present, present_old); - dev_vdbg(&host->pdev->dev, "detect change: %d (was %d)\n", - present, host->present); + if (present != present_old) { + struct atmel_mci *host = slot->host; + struct mmc_request *mrq; - if (present != host->present) { - dev_dbg(&host->mmc->class_dev, "card %s\n", + dev_dbg(&slot->mmc->class_dev, "card %s\n", present ? "inserted" : "removed"); - host->present = present; - /* Reset controller if card is gone */ - if (!present) { - mci_writel(host, CR, MCI_CR_SWRST); - mci_writel(host, IDR, ~0UL); - mci_writel(host, CR, MCI_CR_MCIEN); - } + spin_lock(&host->lock); + + if (!present) + clear_bit(ATMCI_CARD_PRESENT, &slot->flags); + else + set_bit(ATMCI_CARD_PRESENT, &slot->flags); /* Clean up queue if present */ + mrq = slot->mrq; if (mrq) { - /* - * Reset controller to terminate any ongoing - * commands or data transfers. - */ - mci_writel(host, CR, MCI_CR_SWRST); + if (mrq == host->mrq) { + /* + * Reset controller to terminate any ongoing + * commands or data transfers. + */ + mci_writel(host, CR, MCI_CR_SWRST); + mci_writel(host, CR, MCI_CR_MCIEN); + mci_writel(host, MR, host->mode_reg); - if (!atmci_is_completed(host, EVENT_CMD_COMPLETE)) - mrq->cmd->error = -ENOMEDIUM; - - if (mrq->data && !atmci_is_completed(host, - EVENT_DATA_COMPLETE)) { host->data = NULL; - mrq->data->error = -ENOMEDIUM; + host->cmd = NULL; + + switch (host->state) { + case STATE_IDLE: + break; + case STATE_SENDING_CMD: + mrq->cmd->error = -ENOMEDIUM; + if (!mrq->data) + break; + /* fall through */ + case STATE_SENDING_DATA: + mrq->data->error = -ENOMEDIUM; + atmci_stop_dma(host); + break; + case STATE_DATA_BUSY: + case STATE_DATA_ERROR: + if (mrq->data->error == -EINPROGRESS) + mrq->data->error = -ENOMEDIUM; + if (!mrq->stop) + break; + /* fall through */ + case STATE_SENDING_STOP: + mrq->stop->error = -ENOMEDIUM; + break; + } + + atmci_request_end(host, mrq); + } else { + list_del(&slot->queue_node); + mrq->cmd->error = -ENOMEDIUM; + if (mrq->data) + mrq->data->error = -ENOMEDIUM; + if (mrq->stop) + mrq->stop->error = -ENOMEDIUM; + + spin_unlock(&host->lock); + mmc_request_done(slot->mmc, mrq); + spin_lock(&host->lock); } - if (mrq->stop && !atmci_is_completed(host, - EVENT_STOP_COMPLETE)) - mrq->stop->error = -ENOMEDIUM; - - host->cmd = NULL; - atmci_request_end(host->mmc, mrq); } + spin_unlock(&host->lock); - mmc_detect_change(host->mmc, 0); + mmc_detect_change(slot->mmc, 0); } } static void atmci_tasklet_func(unsigned long priv) { - struct mmc_host *mmc = (struct mmc_host *)priv; - struct atmel_mci *host = mmc_priv(mmc); + struct atmel_mci *host = (struct atmel_mci *)priv; struct mmc_request *mrq = host->mrq; struct mmc_data *data = host->data; + struct mmc_command *cmd = host->cmd; + enum atmel_mci_state state = host->state; + enum atmel_mci_state prev_state; + u32 status; + + spin_lock(&host->lock); - dev_vdbg(&mmc->class_dev, - "tasklet: pending/completed/mask %lx/%lx/%x\n", - host->pending_events, host->completed_events, + state = host->state; + + dev_vdbg(&host->pdev->dev, + "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", + state, host->pending_events, host->completed_events, mci_readl(host, IMR)); - if (atmci_test_and_clear_pending(host, EVENT_CMD_COMPLETE)) { - /* - * host->cmd must be set to NULL before the interrupt - * handler sees EVENT_CMD_COMPLETE - */ - host->cmd = NULL; - smp_wmb(); - atmci_set_completed(host, EVENT_CMD_COMPLETE); - atmci_command_complete(host, mrq->cmd, host->cmd_status); - - if (!mrq->cmd->error && mrq->stop - && atmci_is_completed(host, EVENT_XFER_COMPLETE) - && !atmci_test_and_set_completed(host, - EVENT_STOP_SENT)) - send_stop_cmd(host->mmc, mrq->data); - } - if (atmci_test_and_clear_pending(host, EVENT_STOP_COMPLETE)) { - /* - * host->cmd must be set to NULL before the interrupt - * handler sees EVENT_STOP_COMPLETE - */ - host->cmd = NULL; - smp_wmb(); - atmci_set_completed(host, EVENT_STOP_COMPLETE); - atmci_command_complete(host, mrq->stop, host->stop_status); - } - if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) { - u32 status = host->data_status; + do { + prev_state = state; - dev_vdbg(&mmc->class_dev, "data error: status=%08x\n", status); + switch (state) { + case STATE_IDLE: + break; - atmci_set_completed(host, EVENT_DATA_ERROR); - atmci_set_completed(host, EVENT_DATA_COMPLETE); + case STATE_SENDING_CMD: + if (!atmci_test_and_clear_pending(host, + EVENT_CMD_COMPLETE)) + break; - if (status & MCI_DTOE) { - dev_dbg(&mmc->class_dev, - "data timeout error\n"); - data->error = -ETIMEDOUT; - } else if (status & MCI_DCRCE) { - dev_dbg(&mmc->class_dev, "data CRC error\n"); - data->error = -EILSEQ; - } else { - dev_dbg(&mmc->class_dev, - "data FIFO error (status=%08x)\n", - status); - data->error = -EIO; - } + host->cmd = NULL; + atmci_set_completed(host, EVENT_CMD_COMPLETE); + atmci_command_complete(host, mrq->cmd); + if (!mrq->data || cmd->error) { + atmci_request_end(host, host->mrq); + goto unlock; + } + + prev_state = state = STATE_SENDING_DATA; + /* fall through */ + + case STATE_SENDING_DATA: + if (atmci_test_and_clear_pending(host, + EVENT_DATA_ERROR)) { + atmci_stop_dma(host); + if (data->stop) + send_stop_cmd(host, data); + state = STATE_DATA_ERROR; + break; + } - if (host->present && data->stop - && atmci_is_completed(host, EVENT_CMD_COMPLETE) - && !atmci_test_and_set_completed( - host, EVENT_STOP_SENT)) - send_stop_cmd(host->mmc, data); + if (!atmci_test_and_clear_pending(host, + EVENT_XFER_COMPLETE)) + break; - host->data = NULL; - } - if (atmci_test_and_clear_pending(host, EVENT_DATA_COMPLETE)) { - atmci_set_completed(host, EVENT_DATA_COMPLETE); + atmci_set_completed(host, EVENT_XFER_COMPLETE); + prev_state = state = STATE_DATA_BUSY; + /* fall through */ + + case STATE_DATA_BUSY: + if (!atmci_test_and_clear_pending(host, + EVENT_DATA_COMPLETE)) + break; - if (!atmci_is_completed(host, EVENT_DATA_ERROR)) { - data->bytes_xfered = data->blocks * data->blksz; - data->error = 0; + host->data = NULL; + atmci_set_completed(host, EVENT_DATA_COMPLETE); + status = host->data_status; + if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) { + if (status & MCI_DTOE) { + dev_dbg(&host->pdev->dev, + "data timeout error\n"); + data->error = -ETIMEDOUT; + } else if (status & MCI_DCRCE) { + dev_dbg(&host->pdev->dev, + "data CRC error\n"); + data->error = -EILSEQ; + } else { + dev_dbg(&host->pdev->dev, + "data FIFO error (status=%08x)\n", + status); + data->error = -EIO; + } + } else { + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; + } + + if (!data->stop) { + atmci_request_end(host, host->mrq); + goto unlock; + } + + prev_state = state = STATE_SENDING_STOP; + if (!data->error) + send_stop_cmd(host, data); + /* fall through */ + + case STATE_SENDING_STOP: + if (!atmci_test_and_clear_pending(host, + EVENT_CMD_COMPLETE)) + break; + + host->cmd = NULL; + atmci_command_complete(host, mrq->stop); + atmci_request_end(host, host->mrq); + goto unlock; + + case STATE_DATA_ERROR: + if (!atmci_test_and_clear_pending(host, + EVENT_XFER_COMPLETE)) + break; + + state = STATE_DATA_BUSY; + break; } + } while (state != prev_state); - host->data = NULL; - } + host->state = state; - if (host->mrq && !host->cmd && !host->data) - atmci_request_end(mmc, host->mrq); +unlock: + spin_unlock(&host->lock); } static void atmci_read_data_pio(struct atmel_mci *host) @@ -787,6 +1253,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) nbytes += 4; if (offset == sg->length) { + flush_dcache_page(sg_page(sg)); host->sg = sg = sg_next(sg); if (!sg) goto done; @@ -815,9 +1282,11 @@ static void atmci_read_data_pio(struct atmel_mci *host) mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY | ATMCI_DATA_ERROR_FLAGS)); host->data_status = status; + data->bytes_xfered += nbytes; + smp_wmb(); atmci_set_pending(host, EVENT_DATA_ERROR); tasklet_schedule(&host->tasklet); - break; + return; } } while (status & MCI_RXRDY); @@ -830,10 +1299,8 @@ done: mci_writel(host, IDR, MCI_RXRDY); mci_writel(host, IER, MCI_NOTBUSY); data->bytes_xfered += nbytes; - atmci_set_completed(host, EVENT_XFER_COMPLETE); - if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) - && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) - send_stop_cmd(host->mmc, data); + smp_wmb(); + atmci_set_pending(host, EVENT_XFER_COMPLETE); } static void atmci_write_data_pio(struct atmel_mci *host) @@ -886,9 +1353,11 @@ static void atmci_write_data_pio(struct atmel_mci *host) mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY | ATMCI_DATA_ERROR_FLAGS)); host->data_status = status; + data->bytes_xfered += nbytes; + smp_wmb(); atmci_set_pending(host, EVENT_DATA_ERROR); tasklet_schedule(&host->tasklet); - break; + return; } } while (status & MCI_TXRDY); @@ -901,38 +1370,26 @@ done: mci_writel(host, IDR, MCI_TXRDY); mci_writel(host, IER, MCI_NOTBUSY); data->bytes_xfered += nbytes; - atmci_set_completed(host, EVENT_XFER_COMPLETE); - if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) - && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) - send_stop_cmd(host->mmc, data); + smp_wmb(); + atmci_set_pending(host, EVENT_XFER_COMPLETE); } -static void atmci_cmd_interrupt(struct mmc_host *mmc, u32 status) +static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) { - struct atmel_mci *host = mmc_priv(mmc); - mci_writel(host, IDR, MCI_CMDRDY); - if (atmci_is_completed(host, EVENT_STOP_SENT)) { - host->stop_status = status; - atmci_set_pending(host, EVENT_STOP_COMPLETE); - } else { - host->cmd_status = status; - atmci_set_pending(host, EVENT_CMD_COMPLETE); - } - + host->cmd_status = status; + smp_wmb(); + atmci_set_pending(host, EVENT_CMD_COMPLETE); tasklet_schedule(&host->tasklet); } static irqreturn_t atmci_interrupt(int irq, void *dev_id) { - struct mmc_host *mmc = dev_id; - struct atmel_mci *host = mmc_priv(mmc); + struct atmel_mci *host = dev_id; u32 status, mask, pending; unsigned int pass_count = 0; - spin_lock(&mmc->lock); - do { status = mci_readl(host, SR); mask = mci_readl(host, IMR); @@ -944,13 +1401,18 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS | MCI_RXRDY | MCI_TXRDY); pending &= mci_readl(host, IMR); + host->data_status = status; + smp_wmb(); atmci_set_pending(host, EVENT_DATA_ERROR); tasklet_schedule(&host->tasklet); } if (pending & MCI_NOTBUSY) { - mci_writel(host, IDR, (MCI_NOTBUSY - | ATMCI_DATA_ERROR_FLAGS)); + mci_writel(host, IDR, + ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY); + if (!host->data_status) + host->data_status = status; + smp_wmb(); atmci_set_pending(host, EVENT_DATA_COMPLETE); tasklet_schedule(&host->tasklet); } @@ -960,18 +1422,15 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) atmci_write_data_pio(host); if (pending & MCI_CMDRDY) - atmci_cmd_interrupt(mmc, status); + atmci_cmd_interrupt(host, status); } while (pass_count++ < 5); - spin_unlock(&mmc->lock); - return pass_count ? IRQ_HANDLED : IRQ_NONE; } static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) { - struct mmc_host *mmc = dev_id; - struct atmel_mci *host = mmc_priv(mmc); + struct atmel_mci_slot *slot = dev_id; /* * Disable interrupts until the pin has stabilized and check @@ -979,19 +1438,176 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) * middle of the timer routine when this interrupt triggers. */ disable_irq_nosync(irq); - mod_timer(&host->detect_timer, jiffies + msecs_to_jiffies(20)); + mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20)); return IRQ_HANDLED; } +#ifdef CONFIG_MMC_ATMELMCI_DMA + +static inline struct atmel_mci * +dma_client_to_atmel_mci(struct dma_client *client) +{ + return container_of(client, struct atmel_mci, dma.client); +} + +static enum dma_state_client atmci_dma_event(struct dma_client *client, + struct dma_chan *chan, enum dma_state state) +{ + struct atmel_mci *host; + enum dma_state_client ret = DMA_NAK; + + host = dma_client_to_atmel_mci(client); + + switch (state) { + case DMA_RESOURCE_AVAILABLE: + spin_lock_bh(&host->lock); + if (!host->dma.chan) { + host->dma.chan = chan; + ret = DMA_ACK; + } + spin_unlock_bh(&host->lock); + + if (ret == DMA_ACK) + dev_info(&host->pdev->dev, + "Using %s for DMA transfers\n", + chan->dev.bus_id); + break; + + case DMA_RESOURCE_REMOVED: + spin_lock_bh(&host->lock); + if (host->dma.chan == chan) { + host->dma.chan = NULL; + ret = DMA_ACK; + } + spin_unlock_bh(&host->lock); + + if (ret == DMA_ACK) + dev_info(&host->pdev->dev, + "Lost %s, falling back to PIO\n", + chan->dev.bus_id); + break; + + default: + break; + } + + + return ret; +} +#endif /* CONFIG_MMC_ATMELMCI_DMA */ + +static int __init atmci_init_slot(struct atmel_mci *host, + struct mci_slot_pdata *slot_data, unsigned int id, + u32 sdc_reg) +{ + struct mmc_host *mmc; + struct atmel_mci_slot *slot; + + mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); + if (!mmc) + return -ENOMEM; + + slot = mmc_priv(mmc); + slot->mmc = mmc; + slot->host = host; + slot->detect_pin = slot_data->detect_pin; + slot->wp_pin = slot_data->wp_pin; + slot->sdc_reg = sdc_reg; + + mmc->ops = &atmci_ops; + mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); + mmc->f_max = host->bus_hz / 2; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + if (slot_data->bus_width >= 4) + mmc->caps |= MMC_CAP_4_BIT_DATA; + + mmc->max_hw_segs = 64; + mmc->max_phys_segs = 64; + mmc->max_req_size = 32768 * 512; + mmc->max_blk_size = 32768; + mmc->max_blk_count = 512; + + /* Assume card is present initially */ + set_bit(ATMCI_CARD_PRESENT, &slot->flags); + if (gpio_is_valid(slot->detect_pin)) { + if (gpio_request(slot->detect_pin, "mmc_detect")) { + dev_dbg(&mmc->class_dev, "no detect pin available\n"); + slot->detect_pin = -EBUSY; + } else if (gpio_get_value(slot->detect_pin)) { + clear_bit(ATMCI_CARD_PRESENT, &slot->flags); + } + } + + if (!gpio_is_valid(slot->detect_pin)) + mmc->caps |= MMC_CAP_NEEDS_POLL; + + if (gpio_is_valid(slot->wp_pin)) { + if (gpio_request(slot->wp_pin, "mmc_wp")) { + dev_dbg(&mmc->class_dev, "no WP pin available\n"); + slot->wp_pin = -EBUSY; + } + } + + host->slot[id] = slot; + mmc_add_host(mmc); + + if (gpio_is_valid(slot->detect_pin)) { + int ret; + + setup_timer(&slot->detect_timer, atmci_detect_change, + (unsigned long)slot); + + ret = request_irq(gpio_to_irq(slot->detect_pin), + atmci_detect_interrupt, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + "mmc-detect", slot); + if (ret) { + dev_dbg(&mmc->class_dev, + "could not request IRQ %d for detect pin\n", + gpio_to_irq(slot->detect_pin)); + gpio_free(slot->detect_pin); + slot->detect_pin = -EBUSY; + } + } + + atmci_init_debugfs(slot); + + return 0; +} + +static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, + unsigned int id) +{ + /* Debugfs stuff is cleaned up by mmc core */ + + set_bit(ATMCI_SHUTDOWN, &slot->flags); + smp_wmb(); + + mmc_remove_host(slot->mmc); + + if (gpio_is_valid(slot->detect_pin)) { + int pin = slot->detect_pin; + + free_irq(gpio_to_irq(pin), slot); + del_timer_sync(&slot->detect_timer); + gpio_free(pin); + } + if (gpio_is_valid(slot->wp_pin)) + gpio_free(slot->wp_pin); + + slot->host->slot[id] = NULL; + mmc_free_host(slot->mmc); +} + static int __init atmci_probe(struct platform_device *pdev) { struct mci_platform_data *pdata; - struct atmel_mci *host; - struct mmc_host *mmc; - struct resource *regs; - int irq; - int ret; + struct atmel_mci *host; + struct resource *regs; + unsigned int nr_slots; + int irq; + int ret; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) @@ -1003,15 +1619,13 @@ static int __init atmci_probe(struct platform_device *pdev) if (irq < 0) return irq; - mmc = mmc_alloc_host(sizeof(struct atmel_mci), &pdev->dev); - if (!mmc) + host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL); + if (!host) return -ENOMEM; - host = mmc_priv(mmc); host->pdev = pdev; - host->mmc = mmc; - host->detect_pin = pdata->detect_pin; - host->wp_pin = pdata->wp_pin; + spin_lock_init(&host->lock); + INIT_LIST_HEAD(&host->queue); host->mck = clk_get(&pdev->dev, "mci_clk"); if (IS_ERR(host->mck)) { @@ -1031,122 +1645,102 @@ static int __init atmci_probe(struct platform_device *pdev) host->mapbase = regs->start; - mmc->ops = &atmci_ops; - mmc->f_min = (host->bus_hz + 511) / 512; - mmc->f_max = host->bus_hz / 2; - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - mmc->caps |= MMC_CAP_4_BIT_DATA; - - mmc->max_hw_segs = 64; - mmc->max_phys_segs = 64; - mmc->max_req_size = 32768 * 512; - mmc->max_blk_size = 32768; - mmc->max_blk_count = 512; - - tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)mmc); + tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host); - ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, mmc); + ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, host); if (ret) goto err_request_irq; - /* Assume card is present if we don't have a detect pin */ - host->present = 1; - if (gpio_is_valid(host->detect_pin)) { - if (gpio_request(host->detect_pin, "mmc_detect")) { - dev_dbg(&mmc->class_dev, "no detect pin available\n"); - host->detect_pin = -1; - } else { - host->present = !gpio_get_value(host->detect_pin); - } - } +#ifdef CONFIG_MMC_ATMELMCI_DMA + if (pdata->dma_slave) { + struct dma_slave *slave = pdata->dma_slave; - if (!gpio_is_valid(host->detect_pin)) - mmc->caps |= MMC_CAP_NEEDS_POLL; + slave->tx_reg = regs->start + MCI_TDR; + slave->rx_reg = regs->start + MCI_RDR; - if (gpio_is_valid(host->wp_pin)) { - if (gpio_request(host->wp_pin, "mmc_wp")) { - dev_dbg(&mmc->class_dev, "no WP pin available\n"); - host->wp_pin = -1; - } + /* Try to grab a DMA channel */ + host->dma.client.event_callback = atmci_dma_event; + dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask); + host->dma.client.slave = slave; + + dma_async_client_register(&host->dma.client); + dma_async_client_chan_request(&host->dma.client); + } else { + dev_notice(&pdev->dev, "DMA not available, using PIO\n"); } +#endif /* CONFIG_MMC_ATMELMCI_DMA */ platform_set_drvdata(pdev, host); - mmc_add_host(mmc); - - if (gpio_is_valid(host->detect_pin)) { - setup_timer(&host->detect_timer, atmci_detect_change, - (unsigned long)host); - - ret = request_irq(gpio_to_irq(host->detect_pin), - atmci_detect_interrupt, - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "mmc-detect", mmc); - if (ret) { - dev_dbg(&mmc->class_dev, - "could not request IRQ %d for detect pin\n", - gpio_to_irq(host->detect_pin)); - gpio_free(host->detect_pin); - host->detect_pin = -1; - } + /* We need at least one slot to succeed */ + nr_slots = 0; + ret = -ENODEV; + if (pdata->slot[0].bus_width) { + ret = atmci_init_slot(host, &pdata->slot[0], + MCI_SDCSEL_SLOT_A, 0); + if (!ret) + nr_slots++; + } + if (pdata->slot[1].bus_width) { + ret = atmci_init_slot(host, &pdata->slot[1], + MCI_SDCSEL_SLOT_B, 1); + if (!ret) + nr_slots++; } - dev_info(&mmc->class_dev, - "Atmel MCI controller at 0x%08lx irq %d\n", - host->mapbase, irq); + if (!nr_slots) + goto err_init_slot; - atmci_init_debugfs(host); + dev_info(&pdev->dev, + "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", + host->mapbase, irq, nr_slots); return 0; +err_init_slot: +#ifdef CONFIG_MMC_ATMELMCI_DMA + if (pdata->dma_slave) + dma_async_client_unregister(&host->dma.client); +#endif + free_irq(irq, host); err_request_irq: iounmap(host->regs); err_ioremap: clk_put(host->mck); err_clk_get: - mmc_free_host(mmc); + kfree(host); return ret; } static int __exit atmci_remove(struct platform_device *pdev) { - struct atmel_mci *host = platform_get_drvdata(pdev); + struct atmel_mci *host = platform_get_drvdata(pdev); + unsigned int i; platform_set_drvdata(pdev, NULL); - if (host) { - /* Debugfs stuff is cleaned up by mmc core */ - - if (gpio_is_valid(host->detect_pin)) { - int pin = host->detect_pin; - - /* Make sure the timer doesn't enable the interrupt */ - host->detect_pin = -1; - smp_wmb(); - - free_irq(gpio_to_irq(pin), host->mmc); - del_timer_sync(&host->detect_timer); - gpio_free(pin); - } - - mmc_remove_host(host->mmc); + for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { + if (host->slot[i]) + atmci_cleanup_slot(host->slot[i], i); + } - clk_enable(host->mck); - mci_writel(host, IDR, ~0UL); - mci_writel(host, CR, MCI_CR_MCIDIS); - mci_readl(host, SR); - clk_disable(host->mck); + clk_enable(host->mck); + mci_writel(host, IDR, ~0UL); + mci_writel(host, CR, MCI_CR_MCIDIS); + mci_readl(host, SR); + clk_disable(host->mck); - if (gpio_is_valid(host->wp_pin)) - gpio_free(host->wp_pin); +#ifdef CONFIG_MMC_ATMELMCI_DMA + if (host->dma.client.slave) + dma_async_client_unregister(&host->dma.client); +#endif - free_irq(platform_get_irq(pdev, 0), host->mmc); - iounmap(host->regs); + free_irq(platform_get_irq(pdev, 0), host); + iounmap(host->regs); - clk_put(host->mck); + clk_put(host->mck); + kfree(host); - mmc_free_host(host->mmc); - } return 0; } diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index ac4e506b4f88..5ea6b60fa377 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -257,7 +257,6 @@ struct e1000_adapter { struct net_device *netdev; struct pci_dev *pdev; struct net_device_stats net_stats; - spinlock_t stats_lock; /* prevent concurrent stats updates */ /* structs defined in e1000_hw.h */ struct e1000_hw hw; @@ -284,6 +283,8 @@ struct e1000_adapter { unsigned long led_status; unsigned int flags; + struct work_struct downshift_task; + struct work_struct update_phy_task; }; struct e1000_info { @@ -305,6 +306,7 @@ struct e1000_info { #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) #define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_READ_ONLY_NVM (1 << 8) #define FLAG_IS_ICH (1 << 9) #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) #define FLAG_IS_QUAD_PORT_A (1 << 12) @@ -385,6 +387,7 @@ extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); +extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state); extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index e21c9e0f3738..33a3ff17b5d0 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -432,6 +432,10 @@ static void e1000_get_regs(struct net_device *netdev, regs_buff[11] = er32(TIDV); regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + + /* ethtool doesn't use anything past this point, so all this + * code is likely legacy junk for apps that may or may not + * exist */ if (hw->phy.type == e1000_phy_m88) { e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); regs_buff[13] = (u32)phy_data; /* cable length */ @@ -447,7 +451,7 @@ static void e1000_get_regs(struct net_device *netdev, regs_buff[22] = adapter->phy_stats.receive_errors; regs_buff[23] = regs_buff[13]; /* mdix mode */ } - regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + regs_buff[21] = 0; /* was idle_errors */ e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); regs_buff[24] = (u32)phy_data; /* phy local receiver status */ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ @@ -529,6 +533,9 @@ static int e1000_set_eeprom(struct net_device *netdev, if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) return -EFAULT; + if (adapter->flags & FLAG_READ_ONLY_NVM) + return -EINVAL; + max_len = hw->nvm.word_size * 2; first_word = eeprom->offset >> 1; diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 9e38452a738c..bcd2bc477af2 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -58,6 +58,7 @@ #define ICH_FLASH_HSFCTL 0x0006 #define ICH_FLASH_FADDR 0x0008 #define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_PR0 0x0074 #define ICH_FLASH_READ_COMMAND_TIMEOUT 500 #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 @@ -150,6 +151,19 @@ union ich8_hws_flash_regacc { u16 regval; }; +/* ICH Flash Protected Region */ +union ich8_flash_protected_range { + struct ich8_pr { + u32 base:13; /* 0:12 Protected Range Base */ + u32 reserved1:2; /* 13:14 Reserved */ + u32 rpe:1; /* 15 Read Protection Enable */ + u32 limit:13; /* 16:28 Protected Range Limit */ + u32 reserved2:2; /* 29:30 Reserved */ + u32 wpe:1; /* 31 Write Protection Enable */ + } range; + u32 regval; +}; + static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); @@ -366,6 +380,9 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) return 0; } +static DEFINE_MUTEX(nvm_mutex); +static pid_t nvm_owner = -1; + /** * e1000_acquire_swflag_ich8lan - Acquire software control flag * @hw: pointer to the HW structure @@ -379,6 +396,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) u32 extcnf_ctrl; u32 timeout = PHY_CFG_TIMEOUT; + might_sleep(); + + if (!mutex_trylock(&nvm_mutex)) { + WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n", + nvm_owner); + mutex_lock(&nvm_mutex); + } + nvm_owner = current->pid; + while (timeout) { extcnf_ctrl = er32(EXTCNF_CTRL); extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; @@ -393,6 +419,8 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) if (!timeout) { hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); + nvm_owner = -1; + mutex_unlock(&nvm_mutex); return -E1000_ERR_CONFIG; } @@ -414,6 +442,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) extcnf_ctrl = er32(EXTCNF_CTRL); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); + + nvm_owner = -1; + mutex_unlock(&nvm_mutex); } /** @@ -1284,6 +1315,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) * programming failed. */ if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ hw_dbg(hw, "Flash commit failed.\n"); e1000_release_swflag_ich8lan(hw); return ret_val; @@ -1374,6 +1406,49 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) } /** + * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only + * @hw: pointer to the HW structure + * + * To prevent malicious write/erase of the NVM, set it to be read-only + * so that the hardware ignores all write/erase cycles of the NVM via + * the flash control registers. The shadow-ram copy of the NVM will + * still be updated, however any updates to this copy will not stick + * across driver reloads. + **/ +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) +{ + union ich8_flash_protected_range pr0; + union ich8_hws_flash_status hsfsts; + u32 gfpreg; + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + return; + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* Write-protect GbE Sector of NVM */ + pr0.regval = er32flash(ICH_FLASH_PR0); + pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; + pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); + pr0.range.wpe = true; + ew32flash(ICH_FLASH_PR0, pr0.regval); + + /* + * Lock down a subset of GbE Flash Control Registers, e.g. + * PR0 to prevent the write-protection from being lifted. + * Once FLOCKDN is set, the registers protected by it cannot + * be written until FLOCKDN is cleared by a hardware reset. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + hsfsts.hsf_status.flockdn = true; + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + e1000_release_swflag_ich8lan(hw); +} + +/** * e1000_write_flash_data_ich8lan - Writes bytes to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the byte/word to read. @@ -1720,6 +1795,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ew32(CTRL, (ctrl | E1000_CTRL_RST)); msleep(20); + /* release the swflag because it is not reset by hardware reset */ + e1000_release_swflag_ich8lan(hw); + ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) { /* diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index d266510c8a94..b81c4237b5d3 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -47,7 +47,7 @@ #include "e1000.h" -#define DRV_VERSION "0.3.3.3-k2" +#define DRV_VERSION "0.3.3.3-k6" char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -1115,6 +1115,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) writel(0, adapter->hw.hw_addr + rx_ring->tail); } +static void e1000e_downshift_workaround(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, downshift_task); + + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); +} + /** * e1000_intr_msi - Interrupt Handler * @irq: interrupt number @@ -1139,7 +1147,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) */ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && (!(er32(STATUS) & E1000_STATUS_LU))) - e1000e_gig_downshift_workaround_ich8lan(hw); + schedule_work(&adapter->downshift_task); /* * 80003ES2LAN workaround-- For packet buffer work-around on @@ -1205,7 +1213,7 @@ static irqreturn_t e1000_intr(int irq, void *data) */ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && (!(er32(STATUS) & E1000_STATUS_LU))) - e1000e_gig_downshift_workaround_ich8lan(hw); + schedule_work(&adapter->downshift_task); /* * 80003ES2LAN workaround-- @@ -2592,8 +2600,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) /* Explicitly disable IRQ since the NIC can be in any state. */ e1000_irq_disable(adapter); - spin_lock_init(&adapter->stats_lock); - set_bit(__E1000_DOWN, &adapter->state); return 0; @@ -2912,6 +2918,21 @@ static int e1000_set_mac(struct net_device *netdev, void *p) return 0; } +/** + * e1000e_update_phy_task - work thread to update phy + * @work: pointer to our work struct + * + * this worker thread exists because we must acquire a + * semaphore to read the phy, which we could msleep while + * waiting for it, and we can't msleep in a timer. + **/ +static void e1000e_update_phy_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, update_phy_task); + e1000_get_phy_info(&adapter->hw); +} + /* * Need to wait a few seconds after link up to get diagnostic information from * the phy @@ -2919,7 +2940,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p) static void e1000_update_phy_info(unsigned long data) { struct e1000_adapter *adapter = (struct e1000_adapter *) data; - e1000_get_phy_info(&adapter->hw); + schedule_work(&adapter->update_phy_task); } /** @@ -2930,10 +2951,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - unsigned long irq_flags; - u16 phy_tmp; - -#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF /* * Prevent stats update while adapter is being reset, or if the pci @@ -2944,14 +2961,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter) if (pci_channel_offline(pdev)) return; - spin_lock_irqsave(&adapter->stats_lock, irq_flags); - - /* - * these counters are modified from e1000_adjust_tbi_stats, - * called from the interrupt context, so they must only - * be written while holding adapter->stats_lock - */ - adapter->stats.crcerrs += er32(CRCERRS); adapter->stats.gprc += er32(GPRC); adapter->stats.gorc += er32(GORCL); @@ -3022,21 +3031,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter) /* Tx Dropped needs to be maintained elsewhere */ - /* Phy Stats */ - if (hw->phy.media_type == e1000_media_type_copper) { - if ((adapter->link_speed == SPEED_1000) && - (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) { - phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; - adapter->phy_stats.idle_errors += phy_tmp; - } - } - /* Management Stats */ adapter->stats.mgptc += er32(MGTPTC); adapter->stats.mgprc += er32(MGTPRC); adapter->stats.mgpdc += er32(MGTPDC); - - spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); } /** @@ -3048,10 +3046,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_phy_regs *phy = &adapter->phy_regs; int ret_val; - unsigned long irq_flags; - - - spin_lock_irqsave(&adapter->stats_lock, irq_flags); if ((er32(STATUS) & E1000_STATUS_LU) && (adapter->hw.phy.media_type == e1000_media_type_copper)) { @@ -3082,8 +3076,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) phy->stat1000 = 0; phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); } - - spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); } static void e1000_print_link_info(struct e1000_adapter *adapter) @@ -4467,6 +4459,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->bd_number = cards_found++; + e1000e_check_options(adapter); + /* setup adapter struct */ err = e1000_sw_init(adapter); if (err) @@ -4482,6 +4476,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) goto err_hw_init; + if ((adapter->flags & FLAG_IS_ICH) && + (adapter->flags & FLAG_READ_ONLY_NVM)) + e1000e_write_protect_nvm_ich8lan(&adapter->hw); + hw->mac.ops.get_bus_info(&adapter->hw); adapter->hw.phy.autoneg_wait_to_complete = 0; @@ -4572,8 +4570,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, INIT_WORK(&adapter->reset_task, e1000_reset_task); INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); - - e1000e_check_options(adapter); + INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); + INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); /* Initialize link parameters. User can change them with ethtool */ adapter->hw.mac.autoneg = 1; diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index ed912e023a72..d91dbf7ba434 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c @@ -133,6 +133,15 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); */ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); +/* + * Write Protect NVM + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); + struct e1000_option { enum { enable_option, range_option, list_option } type; const char *name; @@ -388,4 +397,25 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) opt.def); } } + { /* Write-protect NVM */ + const struct e1000_option opt = { + .type = enable_option, + .name = "Write-protect NVM", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (adapter->flags & FLAG_IS_ICH) { + if (num_WriteProtectNVM > bd) { + unsigned int write_protect_nvm = WriteProtectNVM[bd]; + e1000_validate_option(&write_protect_nvm, &opt, + adapter); + if (write_protect_nvm) + adapter->flags |= FLAG_READ_ONLY_NVM; + } else { + if (opt.def) + adapter->flags |= FLAG_READ_ONLY_NVM; + } + } + } } diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c index f6c45288d0e7..87e37bc39145 100644 --- a/drivers/net/wireless/ath9k/core.c +++ b/drivers/net/wireless/ath9k/core.c @@ -294,8 +294,6 @@ static int ath_stop(struct ath_softc *sc) * hardware is gone (invalid). */ - if (!sc->sc_invalid) - ath9k_hw_set_interrupts(ah, 0); ath_draintxq(sc, false); if (!sc->sc_invalid) { ath_stoprecv(sc); @@ -797,6 +795,12 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan) if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) sc->sc_imask |= ATH9K_INT_CST; + /* Note: We disable MIB interrupts for now as we don't yet + * handle processing ANI, otherwise you will get an interrupt + * storm after about 7 hours of usage making the system unusable + * with huge latency. Once we do have ANI processing included + * we can re-enable this interrupt. */ +#if 0 /* * Enable MIB interrupts when there are hardware phy counters. * Note we only do this (at the moment) for station mode. @@ -804,6 +808,7 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan) if (ath9k_hw_phycounters(ah) && ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS))) sc->sc_imask |= ATH9K_INT_MIB; +#endif /* * Some hardware processes the TIM IE and fires an * interrupt when the TIM bit is set. For hardware @@ -1336,6 +1341,8 @@ void ath_deinit(struct ath_softc *sc) DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__); + tasklet_kill(&sc->intr_tq); + tasklet_kill(&sc->bcon_tasklet); ath_stop(sc); if (!sc->sc_invalid) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h index 4ee695b76b88..2f84093331ee 100644 --- a/drivers/net/wireless/ath9k/core.h +++ b/drivers/net/wireless/ath9k/core.h @@ -974,7 +974,6 @@ struct ath_softc { u32 sc_keymax; /* size of key cache */ DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */ u8 sc_splitmic; /* split TKIP MIC keys */ - int sc_keytype; /* RX */ struct list_head sc_rxbuf; diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c index 99badf1404c3..acebdf1d20a8 100644 --- a/drivers/net/wireless/ath9k/main.c +++ b/drivers/net/wireless/ath9k/main.c @@ -206,8 +206,6 @@ static int ath_key_config(struct ath_softc *sc, if (!ret) return -EIO; - if (mac) - sc->sc_keytype = hk.kv_type; return 0; } @@ -778,7 +776,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw, case DISABLE_KEY: ath_key_delete(sc, key); clear_bit(key->keyidx, sc->sc_keymap); - sc->sc_keytype = ATH9K_CIPHER_CLR; break; default: ret = -EINVAL; @@ -1414,10 +1411,17 @@ static void ath_pci_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath_softc *sc = hw->priv; + enum ath9k_int status; - if (pdev->irq) + if (pdev->irq) { + ath9k_hw_set_interrupts(sc->sc_ah, 0); + /* clear the ISR */ + ath9k_hw_getisr(sc->sc_ah, &status); + sc->sc_invalid = 1; free_irq(pdev->irq, sc); + } ath_detach(sc); + pci_iounmap(pdev, sc->mem); pci_release_region(pdev, 0); pci_disable_device(pdev); diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index 550129f717e2..8b332e11a656 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c @@ -315,11 +315,11 @@ static int ath_tx_prepare(struct ath_softc *sc, txctl->keyix = tx_info->control.hw_key->hw_key_idx; txctl->frmlen += tx_info->control.icv_len; - if (sc->sc_keytype == ATH9K_CIPHER_WEP) + if (tx_info->control.hw_key->alg == ALG_WEP) txctl->keytype = ATH9K_KEY_TYPE_WEP; - else if (sc->sc_keytype == ATH9K_CIPHER_TKIP) + else if (tx_info->control.hw_key->alg == ALG_TKIP) txctl->keytype = ATH9K_KEY_TYPE_TKIP; - else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM) + else if (tx_info->control.hw_key->alg == ALG_CCMP) txctl->keytype = ATH9K_KEY_TYPE_AES; } diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index da8b7433e3a6..a60ae86bd5c9 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -58,6 +58,7 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, /* ZD1211B */ { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 9c718583a237..77baff022f71 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> +#include <linux/sched.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/topology.h> @@ -484,6 +485,21 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, #endif /* HAVE_PCI_LEGACY */ #ifdef HAVE_PCI_MMAP + +static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) +{ + unsigned long nr, start, size; + + nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + start = vma->vm_pgoff; + size = pci_resource_len(pdev, resno) >> PAGE_SHIFT; + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", + current->comm, start, start+nr, pci_name(pdev), resno, size); + return 0; +} + /** * pci_mmap_resource - map a PCI resource into user memory space * @kobj: kobject for mapping @@ -510,6 +526,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, if (i >= PCI_ROM_RESOURCE) return -ENODEV; + if (!pci_mmap_fits(pdev, i, vma)) + return -EINVAL; + /* pci_mmap_page_range() expects the same kind of entry as coming * from /proc/bus/pci/ which is a "user visible" value. If this is * different from the resource itself, arch will do necessary fixup. diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 9a7c9e1408a4..851f5b83cdbc 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -527,7 +527,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) */ pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, ®32); - if (!(reg32 & PCI_EXP_DEVCAP_RBER && !aspm_force)) { + if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { printk("Pre-1.1 PCIe device detected, " "disable ASPM for %s. It can be enabled forcedly" " with 'pcie_aspm=force'\n", pci_name(pdev)); diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 3b3b5f178797..4edfc4731bd4 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -162,7 +162,7 @@ EXPORT_SYMBOL(pci_find_slot); * time. */ struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, - const struct pci_dev *from) + struct pci_dev *from) { struct pci_dev *pdev; @@ -263,7 +263,7 @@ static int match_pci_dev_by_id(struct device *dev, void *data) * this file. */ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, - const struct pci_dev *from) + struct pci_dev *from) { struct device *dev; struct device *dev_start = NULL; @@ -303,7 +303,7 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, */ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, - const struct pci_dev *from) + struct pci_dev *from) { struct pci_dev *pdev; struct pci_device_id *id; diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 4174d9656e35..34c83d3ca0fa 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -427,6 +427,18 @@ static int pcmcia_device_probe(struct device * dev) p_drv = to_pcmcia_drv(dev->driver); s = p_dev->socket; + /* The PCMCIA code passes the match data in via dev->driver_data + * which is an ugly hack. Once the driver probe is called it may + * and often will overwrite the match data so we must save it first + * + * handle pseudo multifunction devices: + * there are at most two pseudo multifunction devices. + * if we're matching against the first, schedule a + * call which will then check whether there are two + * pseudo devices, and if not, add the second one. + */ + did = p_dev->dev.driver_data; + ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id, p_drv->drv.name); @@ -455,21 +467,14 @@ static int pcmcia_device_probe(struct device * dev) goto put_module; } - /* handle pseudo multifunction devices: - * there are at most two pseudo multifunction devices. - * if we're matching against the first, schedule a - * call which will then check whether there are two - * pseudo devices, and if not, add the second one. - */ - did = p_dev->dev.driver_data; if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) && (p_dev->socket->device_count == 1) && (p_dev->device_no == 0)) pcmcia_add_device_later(p_dev->socket, 0); - put_module: +put_module: if (ret) module_put(p_drv->owner); - put_dev: +put_dev: if (ret) put_device(dev); return (ret); diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index f118252f3a9f..52e2743b04ec 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -422,6 +422,12 @@ done: return err; } +static int rtc_dev_fasync(int fd, struct file *file, int on) +{ + struct rtc_device *rtc = file->private_data; + return fasync_helper(fd, file, on, &rtc->async_queue); +} + static int rtc_dev_release(struct inode *inode, struct file *file) { struct rtc_device *rtc = file->private_data; @@ -434,16 +440,13 @@ static int rtc_dev_release(struct inode *inode, struct file *file) if (rtc->ops->release) rtc->ops->release(rtc->dev.parent); + if (file->f_flags & FASYNC) + rtc_dev_fasync(-1, file, 0); + clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); return 0; } -static int rtc_dev_fasync(int fd, struct file *file, int on) -{ - struct rtc_device *rtc = file->private_data; - return fasync_helper(fd, file, on, &rtc->async_queue); -} - static const struct file_operations rtc_dev_fops = { .owner = THIS_MODULE, .llseek = no_llseek, diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 1679e2f91c94..a0b6b46e7466 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -447,51 +447,36 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, { char s[80]; - sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no); - + sprintf(s, "qdio: %s ", dev_name(&cdev->dev)); switch (irq_ptr->qib.qfmt) { case QDIO_QETH_QFMT: - sprintf(s + strlen(s), "OSADE "); + sprintf(s + strlen(s), "OSA "); break; case QDIO_ZFCP_QFMT: sprintf(s + strlen(s), "ZFCP "); break; case QDIO_IQDIO_QFMT: - sprintf(s + strlen(s), "HiperSockets "); + sprintf(s + strlen(s), "HS "); break; } - sprintf(s + strlen(s), "using: "); - - if (!is_thinint_irq(irq_ptr)) - sprintf(s + strlen(s), "no"); - sprintf(s + strlen(s), "AdapterInterrupts "); - if (!(irq_ptr->sch_token != 0)) - sprintf(s + strlen(s), "no"); - sprintf(s + strlen(s), "QEBSM "); - if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) - sprintf(s + strlen(s), "no"); - sprintf(s + strlen(s), "OutboundPCI "); - if (!css_general_characteristics.aif_tdd) - sprintf(s + strlen(s), "no"); - sprintf(s + strlen(s), "TDD\n"); - printk(KERN_INFO "qdio: %s", s); - - memset(s, 0, sizeof(s)); - sprintf(s, "%s SIGA required: ", cdev->dev.bus_id); - if (irq_ptr->siga_flag.input) - sprintf(s + strlen(s), "Read "); - if (irq_ptr->siga_flag.output) - sprintf(s + strlen(s), "Write "); - if (irq_ptr->siga_flag.sync) - sprintf(s + strlen(s), "Sync "); - if (!irq_ptr->siga_flag.no_sync_ti) - sprintf(s + strlen(s), "SyncAI "); - if (!irq_ptr->siga_flag.no_sync_out_ti) - sprintf(s + strlen(s), "SyncOutAI "); - if (!irq_ptr->siga_flag.no_sync_out_pci) - sprintf(s + strlen(s), "SyncOutPCI"); + sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no); + sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr)); + sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0); + sprintf(s + strlen(s), "PCI:%d ", + (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0); + sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd); + sprintf(s + strlen(s), "SIGA:"); + sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " "); + sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " "); + sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " "); + sprintf(s + strlen(s), "%s", + (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " "); + sprintf(s + strlen(s), "%s", + (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " "); + sprintf(s + strlen(s), "%s", + (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); sprintf(s + strlen(s), "\n"); - printk(KERN_INFO "qdio: %s", s); + printk(KERN_INFO "%s", s); } int __init qdio_setup_init(void) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 45a3b93eed57..bf41887cdd65 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1834,7 +1834,6 @@ clear_risc_ints: WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); } spin_unlock_irq(&ha->hardware_lock); - ha->isp_ops->enable_intrs(ha); fail: return ret; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 26afe44265c7..6d0f0e5f2827 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1740,6 +1740,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto probe_failed; + ha->isp_ops->enable_intrs(ha); + scsi_scan_host(host); qla2x00_alloc_sysfs_attr(ha); diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 4a1cf6377f6c..905350896725 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -914,6 +914,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, ds[i].d_count = sg_dma_len(s); } sg_count -= n; + sg = s; } } else { cmd->dataseg[0].d_base = 0; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ff5d56b3ee4d..62307bd794a9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -852,7 +852,7 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd) void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; - int this_count = scsi_bufflen(cmd); + int this_count; struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; int error = 0; @@ -908,6 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) */ if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) return; + this_count = blk_rq_bytes(req); /* good_bytes = 0, or (inclusive) there were leftovers and * result = 0, so scsi_end_request couldn't retry. diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 3a6da80b081c..61fb8b6d19af 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c @@ -131,7 +131,8 @@ struct atmel_uart_char { struct atmel_uart_port { struct uart_port uart; /* uart */ struct clk *clk; /* uart clock */ - unsigned short suspended; /* is port suspended? */ + int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ + u32 backup_imr; /* IMR saved during suspend */ int break_active; /* break being received */ short use_dma_rx; /* enable PDC receiver */ @@ -984,8 +985,15 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state, * This is called on uart_open() or a resume event. */ clk_enable(atmel_port->clk); + + /* re-enable interrupts if we disabled some on suspend */ + UART_PUT_IER(port, atmel_port->backup_imr); break; case 3: + /* Back up the interrupt mask and disable all interrupts */ + atmel_port->backup_imr = UART_GET_IMR(port); + UART_PUT_IDR(port, -1); + /* * Disable the peripheral clock for this serial port. * This is called on uart_close() or a suspend event. @@ -1475,13 +1483,12 @@ static int atmel_serial_suspend(struct platform_device *pdev, cpu_relax(); } - if (device_may_wakeup(&pdev->dev) - && !atmel_serial_clk_will_stop()) - enable_irq_wake(port->irq); - else { - uart_suspend_port(&atmel_uart, port); - atmel_port->suspended = 1; - } + /* we can not wake up if we're running on slow clock */ + atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); + if (atmel_serial_clk_will_stop()) + device_set_wakeup_enable(&pdev->dev, 0); + + uart_suspend_port(&atmel_uart, port); return 0; } @@ -1491,11 +1498,8 @@ static int atmel_serial_resume(struct platform_device *pdev) struct uart_port *port = platform_get_drvdata(pdev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - if (atmel_port->suspended) { - uart_resume_port(&atmel_uart, port); - atmel_port->suspended = 0; - } else - disable_irq_wake(port->irq); + uart_resume_port(&atmel_uart, port); + device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); return 0; } @@ -1513,6 +1517,8 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev) BUILD_BUG_ON(!is_power_of_2(ATMEL_SERIAL_RINGSIZE)); port = &atmel_ports[pdev->id]; + port->backup_imr = 0; + atmel_init_port(port, pdev); if (!atmel_use_dma_rx(&port->uart)) { diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c index c4eaacd6e553..b872bfaf4bd2 100644 --- a/drivers/spi/orion_spi.c +++ b/drivers/spi/orion_spi.c @@ -427,7 +427,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) goto msg_rejected; } - if (t->speed_hz < orion_spi->min_speed) { + if (t->speed_hz && t->speed_hz < orion_spi->min_speed) { dev_err(&spi->dev, "message rejected : " "device min speed (%d Hz) exceeds " diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 0e53354c1cfe..d47d3636227f 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c @@ -49,7 +49,7 @@ MODULE_ALIAS("platform:pxa2xx-spi"); #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) -#define IS_DMA_ALIGNED(x) (((x) & 0x07) == 0) +#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0) #define MAX_DMA_LEN 8191 /* @@ -896,7 +896,7 @@ static void pump_transfers(unsigned long data) || transfer->rx_dma || transfer->tx_dma) { dev_err(&drv_data->pdev->dev, "pump_transfers: mapped transfer length " - "of %lu is greater than %d\n", + "of %u is greater than %d\n", transfer->len, MAX_DMA_LEN); message->status = -EINVAL; giveback(drv_data); diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 87ab2443e66d..0ffabf5c0b60 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -471,6 +471,7 @@ static int ssb_devices_register(struct ssb_bus *bus) #endif break; case SSB_BUSTYPE_SSB: + dev->dma_mask = &dev->coherent_dma_mask; break; } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 8abd4e59bf4a..8ab389dca2b9 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1876,7 +1876,8 @@ int usb_add_hcd(struct usb_hcd *hcd, * with IRQF_SHARED. As usb_hcd_irq() will always disable * interrupts we can remove it here. */ - irqflags &= ~IRQF_DISABLED; + if (irqflags & IRQF_SHARED) + irqflags &= ~IRQF_DISABLED; snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", hcd->driver->description, hcd->self.busnum); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 6a5cb018383d..d99963873e37 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2683,35 +2683,17 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, USB_PORT_STAT_C_ENABLE); #endif - /* Try to use the debounce delay for protection against - * port-enable changes caused, for example, by EMI. - */ - if (portchange & (USB_PORT_STAT_C_CONNECTION | - USB_PORT_STAT_C_ENABLE)) { - status = hub_port_debounce(hub, port1); - if (status < 0) { - if (printk_ratelimit()) - dev_err (hub_dev, "connect-debounce failed, " - "port %d disabled\n", port1); - portstatus &= ~USB_PORT_STAT_CONNECTION; - } else { - portstatus = status; - } - } - /* Try to resuscitate an existing device */ udev = hdev->children[port1-1]; if ((portstatus & USB_PORT_STAT_CONNECTION) && udev && udev->state != USB_STATE_NOTATTACHED) { - usb_lock_device(udev); if (portstatus & USB_PORT_STAT_ENABLE) { status = 0; /* Nothing to do */ - } else if (!udev->persist_enabled) { - status = -ENODEV; /* Mustn't resuscitate */ #ifdef CONFIG_USB_SUSPEND - } else if (udev->state == USB_STATE_SUSPENDED) { + } else if (udev->state == USB_STATE_SUSPENDED && + udev->persist_enabled) { /* For a suspended device, treat this as a * remote wakeup event. */ @@ -2726,7 +2708,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, #endif } else { - status = usb_reset_device(udev); + status = -ENODEV; /* Don't resuscitate */ } usb_unlock_device(udev); @@ -2741,6 +2723,19 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, usb_disconnect(&hdev->children[port1-1]); clear_bit(port1, hub->change_bits); + if (portchange & (USB_PORT_STAT_C_CONNECTION | + USB_PORT_STAT_C_ENABLE)) { + status = hub_port_debounce(hub, port1); + if (status < 0) { + if (printk_ratelimit()) + dev_err(hub_dev, "connect-debounce failed, " + "port %d disabled\n", port1); + portstatus &= ~USB_PORT_STAT_CONNECTION; + } else { + portstatus = status; + } + } + /* Return now if debouncing failed or nothing is connected */ if (!(portstatus & USB_PORT_STAT_CONNECTION)) { @@ -2748,7 +2743,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 && !(portstatus & (1 << USB_PORT_FEAT_POWER))) set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); - + if (portstatus & USB_PORT_STAT_ENABLE) goto done; return; diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c index 1cfccf102a2d..45ad556169f1 100644 --- a/drivers/usb/gadget/fsl_usb2_udc.c +++ b/drivers/usb/gadget/fsl_usb2_udc.c @@ -223,7 +223,7 @@ static int dr_controller_setup(struct fsl_udc *udc) fsl_writel(tmp, &dr_regs->endpointlistaddr); VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x", - (int)udc->ep_qh, (int)tmp, + udc->ep_qh, (int)tmp, fsl_readl(&dr_regs->endpointlistaddr)); /* Config PHY interface */ diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 574c53831a05..bb54cca4c543 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c @@ -787,7 +787,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel) omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_TIPB, OMAP_DMA_AMODE_CONSTANT, - (unsigned long) io_v2p(UDC_DATA_DMA), + UDC_DATA_DMA, 0, 0); } } else { @@ -804,7 +804,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel) omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_TIPB, OMAP_DMA_AMODE_CONSTANT, - (unsigned long) io_v2p(UDC_DATA_DMA), + UDC_DATA_DMA, 0, 0); /* EMIFF or SDRC */ omap_set_dma_dest_burst_mode(ep->lch, diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index d9d53f289caf..8409e0705d63 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -145,16 +145,6 @@ static int handshake (struct ehci_hcd *ehci, void __iomem *ptr, return -ETIMEDOUT; } -static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr, - u32 mask, u32 done, int usec) -{ - int error = handshake(ehci, ptr, mask, done, usec); - if (error) - ehci_to_hcd(ehci)->state = HC_STATE_HALT; - - return error; -} - /* force HC to halt state from unknown (EHCI spec section 2.3) */ static int ehci_halt (struct ehci_hcd *ehci) { @@ -173,6 +163,22 @@ static int ehci_halt (struct ehci_hcd *ehci) STS_HALT, STS_HALT, 16 * 125); } +static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr, + u32 mask, u32 done, int usec) +{ + int error; + + error = handshake(ehci, ptr, mask, done, usec); + if (error) { + ehci_halt(ehci); + ehci_to_hcd(ehci)->state = HC_STATE_HALT; + ehci_err(ehci, "force halt; handhake %p %08x %08x -> %d\n", + ptr, mask, done, error); + } + + return error; +} + /* put TDI/ARC silicon into EHCI mode */ static void tdi_reset (struct ehci_hcd *ehci) { diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index b7853c8bac0f..4a0c5a78b2ed 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -437,6 +437,9 @@ static int enable_periodic (struct ehci_hcd *ehci) u32 cmd; int status; + if (ehci->periodic_sched++) + return 0; + /* did clearing PSE did take effect yet? * takes effect only at frame boundaries... */ @@ -461,6 +464,9 @@ static int disable_periodic (struct ehci_hcd *ehci) u32 cmd; int status; + if (--ehci->periodic_sched) + return 0; + /* did setting PSE not take effect yet? * takes effect only at frame boundaries... */ @@ -544,13 +550,10 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) : (qh->usecs * 8); /* maybe enable periodic schedule processing */ - if (!ehci->periodic_sched++) - return enable_periodic (ehci); - - return 0; + return enable_periodic(ehci); } -static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) +static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) { unsigned i; unsigned period; @@ -586,9 +589,7 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) qh_put (qh); /* maybe turn off periodic schedule */ - ehci->periodic_sched--; - if (!ehci->periodic_sched) - (void) disable_periodic (ehci); + return disable_periodic(ehci); } static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) @@ -1562,9 +1563,7 @@ itd_link_urb ( urb->hcpriv = NULL; timer_action (ehci, TIMER_IO_WATCHDOG); - if (unlikely (!ehci->periodic_sched++)) - return enable_periodic (ehci); - return 0; + return enable_periodic(ehci); } #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) @@ -1642,7 +1641,7 @@ itd_complete ( ehci_urb_done(ehci, urb, 0); retval = true; urb = NULL; - ehci->periodic_sched--; + (void) disable_periodic(ehci); ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; if (unlikely (list_empty (&stream->td_list))) { @@ -1951,9 +1950,7 @@ sitd_link_urb ( urb->hcpriv = NULL; timer_action (ehci, TIMER_IO_WATCHDOG); - if (!ehci->periodic_sched++) - return enable_periodic (ehci); - return 0; + return enable_periodic(ehci); } /*-------------------------------------------------------------------------*/ @@ -2019,7 +2016,7 @@ sitd_complete ( ehci_urb_done(ehci, urb, 0); retval = true; urb = NULL; - ehci->periodic_sched--; + (void) disable_periodic(ehci); ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; if (list_empty (&stream->td_list)) { @@ -2243,8 +2240,7 @@ restart: if (unlikely (modified)) { if (likely(ehci->periodic_sched > 0)) goto restart; - /* maybe we can short-circuit this scan! */ - disable_periodic(ehci); + /* short-circuit this scan */ now_uframe = clock; break; } diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index a0017486ad4e..58b2b8fc9439 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -9,6 +9,7 @@ comment "Enable Host or Gadget support to see Inventra options" # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller config USB_MUSB_HDRC depends on (USB || USB_GADGET) && HAVE_CLK + depends on !SUPERH select TWL4030_USB if MACH_OMAP_3430SDP tristate 'Inventra Highspeed Dual Role Controller (TI, ...)' help diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index c5b8f0296fcf..128e949db47c 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -100,8 +100,8 @@ #include <linux/io.h> #ifdef CONFIG_ARM -#include <asm/arch/hardware.h> -#include <asm/arch/memory.h> +#include <mach/hardware.h> +#include <mach/memory.h> #include <asm/mach-types.h> #endif diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 298b22e6ad0d..9d2dcb121c5e 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -35,8 +35,8 @@ #include <linux/io.h> #include <asm/mach-types.h> -#include <asm/arch/hardware.h> -#include <asm/arch/mux.h> +#include <mach/hardware.h> +#include <mach/mux.h> #include "musb_core.h" #include "omap2430.h" diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h index 786a62071f72..dc7670718cd2 100644 --- a/drivers/usb/musb/omap2430.h +++ b/drivers/usb/musb/omap2430.h @@ -11,8 +11,8 @@ #define __MUSB_OMAP243X_H__ #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) -#include <asm/arch/hardware.h> -#include <asm/arch/usb.h> +#include <mach/hardware.h> +#include <mach/usb.h> /* * OMAP2430-specific definitions diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c index 442cba69cce5..1279553381e3 100644 --- a/drivers/usb/serial/cp2101.c +++ b/drivers/usb/serial/cp2101.c @@ -72,6 +72,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ + { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ @@ -83,6 +84,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ @@ -93,6 +95,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { } /* Terminating Entry */ }; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 984f6eff4c47..3dc93b542b30 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -654,6 +654,9 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 382265bba969..8a5b6df3a976 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -750,6 +750,7 @@ #define PAPOUCH_VID 0x5050 /* Vendor ID */ #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ +#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ /* * ACG Identification Technologies GmbH products (http://www.acg.de/). @@ -838,6 +839,10 @@ /* Rig Expert Ukraine devices */ #define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */ +/* Domintell products http://www.domintell.com */ +#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */ +#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */ + /* Commands */ #define FTDI_SIO_RESET 0 /* Reset the port */ #define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 9f9cd36455f4..73f8277f88f2 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -218,6 +218,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 #define ZTE_PRODUCT_MF628 0x0015 +#define ZTE_PRODUCT_CDMA_TECH 0xfffe static struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, @@ -347,6 +348,7 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 706033753adb..ea1a103c99be 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -14,7 +14,7 @@ Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> */ -#define DRIVER_VERSION "v.1.2.13a" +#define DRIVER_VERSION "v.1.3.2" #define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" @@ -30,9 +30,6 @@ #define SWIMS_USB_REQUEST_SetPower 0x00 #define SWIMS_USB_REQUEST_SetNmea 0x07 -#define SWIMS_USB_REQUEST_SetMode 0x0B -#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A -#define SWIMS_SET_MODE_Modem 0x0001 /* per port private data */ #define N_IN_URB 4 @@ -163,7 +160,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ - { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ + { USB_DEVICE(0x03f0, 0x1b1d) }, /* HP ev2200 a.k.a MC5720 */ { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ @@ -175,6 +172,8 @@ static struct usb_device_id id_table [] = { /* Sierra Wireless Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */ + { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless Device */ + { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless Device */ { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ @@ -187,6 +186,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ + { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */ { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */ { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */ @@ -204,6 +204,8 @@ static struct usb_device_id id_table [] = { /* Sierra Wireless Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, /* Sierra Wireless Device */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)}, + /* Sierra Wireless Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index e39c779e4160..9a3e495c769c 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -1744,7 +1744,7 @@ static int ti_download_firmware(struct ti_device *tdev, int type) if (buffer) { memcpy(buffer, fw_p->data, fw_p->size); memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size); - ti_do_download(dev, pipe, buffer, fw_p->size); + status = ti_do_download(dev, pipe, buffer, fw_p->size); kfree(buffer); } release_firmware(fw_p); diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index b157c48e8b78..4f7f9e3ae0a4 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -733,7 +733,9 @@ int usb_serial_probe(struct usb_interface *interface, ((le16_to_cpu(dev->descriptor.idVendor) == ATEN_VENDOR_ID) && (le16_to_cpu(dev->descriptor.idProduct) == ATEN_PRODUCT_ID)) || ((le16_to_cpu(dev->descriptor.idVendor) == ALCOR_VENDOR_ID) && - (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID))) { + (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID)) || + ((le16_to_cpu(dev->descriptor.idVendor) == SIEMENS_VENDOR_ID) && + (le16_to_cpu(dev->descriptor.idProduct) == SIEMENS_PRODUCT_ID_EF81))) { if (interface != dev->actconfig->interface[0]) { /* check out the endpoints of the other interface*/ iface_desc = dev->actconfig->interface[0]->cur_altsetting; diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index c76034672c18..3d9249632ae1 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig @@ -146,18 +146,6 @@ config USB_STORAGE_KARMA on the resulting scsi device node returns the Karma to normal operation. -config USB_STORAGE_SIERRA - bool "Sierra Wireless TRU-Install Feature Support" - depends on USB_STORAGE - help - Say Y here to include additional code to support Sierra Wireless - products with the TRU-Install feature (e.g., AC597E, AC881U). - - This code switches the Sierra Wireless device from being in - Mass Storage mode to Modem mode. It also has the ability to - support host software upgrades should full Linux support be added - to TRU-Install. - config USB_STORAGE_CYPRESS_ATACB bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" depends on USB_STORAGE diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile index bc3415b475c9..7f8beb5366ae 100644 --- a/drivers/usb/storage/Makefile +++ b/drivers/usb/storage/Makefile @@ -21,11 +21,10 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o -usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ - initializers.o $(usb-storage-obj-y) + initializers.o sierra_ms.o $(usb-storage-obj-y) ifneq ($(CONFIG_USB_LIBUSUAL),) obj-$(CONFIG_USB) += libusual.o diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ba412e68d474..cd155475cb6e 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -160,6 +160,13 @@ UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0592, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 ), +/* Reported by Filip Joelsson <filip@blueturtle.nu> */ +UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600, + "Nokia", + "Nokia 3110c", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY ), + /* Reported by Mario Rettig <mariorettig@web.de> */ UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, "Nokia", @@ -232,6 +239,20 @@ UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), +/* Reported by Richard Nauber <RichardNauber@web.de> */ +UNUSUAL_DEV( 0x0421, 0x04fa, 0x0601, 0x0601, + "Nokia", + "6300", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY ), + +/* Patch for Nokia 5310 capacity */ +UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591, + "Nokia", + "5310", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY ), + /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, "SMSC", @@ -987,6 +1008,13 @@ UNUSUAL_DEV( 0x069b, 0x3004, 0x0001, 0x0001, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), +/* Reported by Adrian Pilchowiec <adi1981@epf.pl> */ +UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000, + "RockChip", + "MP3", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64), + /* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com> * This USB MP3/AVI player device fails and disconnects if more than 128 * sectors (64kB) are read/written in a single command, and may be present @@ -1576,7 +1604,6 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, US_SC_DEVICE, US_PR_DEVICE, NULL, 0), -#ifdef CONFIG_USB_STORAGE_SIERRA /* Reported by Kevin Lloyd <linux@sierrawireless.com> * Entry is needed for the initializer function override, * which instructs the device to load as a modem @@ -1587,7 +1614,6 @@ UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, "USB MMC Storage", US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init, 0), -#endif /* Reported by Jaco Kroon <jaco@kroon.co.za> * The usb-storage module found on the Digitech GNX4 (and supposedly other diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 73679aa506de..27016fd2cad1 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -102,9 +102,7 @@ #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB #include "cypress_atacb.h" #endif -#ifdef CONFIG_USB_STORAGE_SIERRA #include "sierra_ms.h" -#endif /* Some informational data */ MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index c6299e8a041d..9cbff84b787d 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -2400,11 +2400,15 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) if (!fbcon_is_inactive(vc, info)) { if (ops->blank_state != blank) { + int ret = 1; + ops->blank_state = blank; fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); ops->cursor_flash = (!blank); - if (fb_blank(info, blank)) + if (info->fbops->fb_blank) + ret = info->fbops->fb_blank(blank, info); + if (ret) fbcon_generic_blank(vc, info, blank); } diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h index a6e38e9ea73f..89a346880ec0 100644 --- a/drivers/video/console/fbcon.h +++ b/drivers/video/console/fbcon.h @@ -110,7 +110,7 @@ static inline int mono_col(const struct fb_info *info) __u32 max_len; max_len = max(info->var.green.length, info->var.red.length); max_len = max(info->var.blue.length, max_len); - return ~(0xfff << (max_len & 0xff)); + return (~(0xfff << max_len)) & 0xff; } static inline int attr_col_ec(int shift, struct vc_data *vc, diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c index 614a5c7017b6..6799a6de66fe 100644 --- a/drivers/watchdog/geodewdt.c +++ b/drivers/watchdog/geodewdt.c @@ -130,8 +130,8 @@ static ssize_t geodewdt_write(struct file *file, const char __user *data, return len; } -static int geodewdt_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long geodewdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; @@ -198,7 +198,7 @@ static const struct file_operations geodewdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = geodewdt_write, - .ioctl = geodewdt_ioctl, + .unlocked_ioctl = geodewdt_ioctl, .open = geodewdt_open, .release = geodewdt_release, }; diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c index b82405cfb4cd..89fcefcc8510 100644 --- a/drivers/watchdog/ibmasr.c +++ b/drivers/watchdog/ibmasr.c @@ -85,7 +85,6 @@ static void __asr_toggle(void) outb(reg & ~asr_toggle_mask, asr_write_addr); reg = inb(asr_read_addr); - spin_unlock(&asr_lock); } static void asr_toggle(void) diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index 0ed84162437b..6d9f3d4a9987 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -173,8 +173,8 @@ static const struct watchdog_info ident = { .identity = "PNX4008 Watchdog", }; -static long pnx4008_wdt_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long pnx4008_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { int ret = -ENOTTY; int time; diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c index 6756bcb009ed..c9c73b69c5e5 100644 --- a/drivers/watchdog/rc32434_wdt.c +++ b/drivers/watchdog/rc32434_wdt.c @@ -182,8 +182,8 @@ static ssize_t rc32434_wdt_write(struct file *file, const char *data, return 0; } -static int rc32434_wdt_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { void __user *argp = (void __user *)arg; int new_timeout; @@ -242,7 +242,7 @@ static struct file_operations rc32434_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = rc32434_wdt_write, - .ioctl = rc32434_wdt_ioctl, + .unlocked_ioctl = rc32434_wdt_ioctl, .open = rc32434_wdt_open, .release = rc32434_wdt_release, }; diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index 9108efa73e7d..bf92802f2bbe 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c @@ -144,8 +144,8 @@ static int rdc321x_wdt_release(struct inode *inode, struct file *file) return 0; } -static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { void __user *argp = (void __user *)arg; unsigned int value; @@ -204,7 +204,7 @@ static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf, static const struct file_operations rdc321x_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, - .ioctl = rdc321x_wdt_ioctl, + .unlocked_ioctl = rdc321x_wdt_ioctl, .open = rdc321x_wdt_open, .write = rdc321x_wdt_write, .release = rdc321x_wdt_release, diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c index db362c34958b..191ea6302107 100644 --- a/drivers/watchdog/wdt285.c +++ b/drivers/watchdog/wdt285.c @@ -115,8 +115,8 @@ static int watchdog_release(struct inode *inode, struct file *file) return 0; } -static ssize_t watchdog_write(struct file *file, const char *data, - size_t len, loff_t *ppos) +static ssize_t watchdog_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) { /* * Refresh the timer. @@ -133,21 +133,22 @@ static const struct watchdog_info ident = { }; static long watchdog_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) + unsigned long arg) { unsigned int new_margin; + int __user *int_arg = (int __user *)arg; int ret = -ENOTTY; switch (cmd) { case WDIOC_GETSUPPORT: ret = 0; - if (copy_to_user((void *)arg, &ident, sizeof(ident))) + if (copy_to_user((void __user *)arg, &ident, sizeof(ident))) ret = -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: - ret = put_user(0, (int *)arg); + ret = put_user(0, int_arg); break; case WDIOC_KEEPALIVE: @@ -156,7 +157,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, break; case WDIOC_SETTIMEOUT: - ret = get_user(new_margin, (int *)arg); + ret = get_user(new_margin, int_arg); if (ret) break; @@ -171,7 +172,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, watchdog_ping(); /* Fall */ case WDIOC_GETTIMEOUT: - ret = put_user(soft_margin, (int *)arg); + ret = put_user(soft_margin, int_arg); break; } return ret; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index c95295c65045..e83aa5ebe861 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -626,8 +626,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, return NULL; error: - if (fid) - p9_client_clunk(fid); + p9_client_clunk(fid); return ERR_PTR(result); } diff --git a/fs/dcache.c b/fs/dcache.c index 80e93956aced..e7a1a99b7464 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1395,6 +1395,10 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) if (dentry->d_parent != parent) goto next; + /* non-existing due to RCU? */ + if (d_unhashed(dentry)) + goto next; + /* * It is safe to compare names since d_move() cannot * change the qstr (protected by d_lock). @@ -1410,10 +1414,8 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) goto next; } - if (!d_unhashed(dentry)) { - atomic_inc(&dentry->d_count); - found = dentry; - } + atomic_inc(&dentry->d_count); + found = dentry; spin_unlock(&dentry->d_lock); break; next: diff --git a/fs/exec.c b/fs/exec.c index 32993beecbe9..cecee501ce78 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -752,11 +752,11 @@ static int exec_mmap(struct mm_struct *mm) tsk->active_mm = mm; activate_mm(active_mm, mm); task_unlock(tsk); - mm_update_next_owner(old_mm); arch_pick_mmap_layout(mm); if (old_mm) { up_read(&old_mm->mmap_sem); BUG_ON(active_mm != old_mm); + mm_update_next_owner(old_mm); mmput(old_mm); return 0; } diff --git a/fs/inotify_user.c b/fs/inotify_user.c index 60249429a253..d85c7d931cdf 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c @@ -323,7 +323,7 @@ out: } /* - * remove_kevent - cleans up and ultimately frees the given kevent + * remove_kevent - cleans up the given kevent * * Caller must hold dev->ev_mutex. */ @@ -334,7 +334,13 @@ static void remove_kevent(struct inotify_device *dev, dev->event_count--; dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; +} +/* + * free_kevent - frees the given kevent. + */ +static void free_kevent(struct inotify_kernel_event *kevent) +{ kfree(kevent->name); kmem_cache_free(event_cachep, kevent); } @@ -350,6 +356,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev) struct inotify_kernel_event *kevent; kevent = inotify_dev_get_event(dev); remove_kevent(dev, kevent); + free_kevent(kevent); } } @@ -433,17 +440,15 @@ static ssize_t inotify_read(struct file *file, char __user *buf, dev = file->private_data; while (1) { - int events; prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); mutex_lock(&dev->ev_mutex); - events = !list_empty(&dev->events); - mutex_unlock(&dev->ev_mutex); - if (events) { + if (!list_empty(&dev->events)) { ret = 0; break; } + mutex_unlock(&dev->ev_mutex); if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; @@ -462,7 +467,6 @@ static ssize_t inotify_read(struct file *file, char __user *buf, if (ret) return ret; - mutex_lock(&dev->ev_mutex); while (1) { struct inotify_kernel_event *kevent; @@ -481,6 +485,13 @@ static ssize_t inotify_read(struct file *file, char __user *buf, } break; } + remove_kevent(dev, kevent); + + /* + * Must perform the copy_to_user outside the mutex in order + * to avoid a lock order reversal with mmap_sem. + */ + mutex_unlock(&dev->ev_mutex); if (copy_to_user(buf, &kevent->event, event_size)) { ret = -EFAULT; @@ -498,7 +509,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, count -= kevent->event.len; } - remove_kevent(dev, kevent); + free_kevent(kevent); + + mutex_lock(&dev->ev_mutex); } mutex_unlock(&dev->ev_mutex); diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 52312ec93ff4..5145cb9125af 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -58,7 +58,7 @@ const struct inode_operations ramfs_file_inode_operations = { * size 0 on the assumption that it's going to be used for an mmap of shared * memory */ -static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) +int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) { struct pagevec lru_pvec; unsigned long npages, xpages, loop, limit; diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index b9cb77473758..d7f7645779f2 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -538,7 +538,7 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n); for (i = 0; i < n; i++) printk(KERN_DEBUG "\t ino %llu\n", - le64_to_cpu(orph->inos[i])); + (unsigned long long)le64_to_cpu(orph->inos[i])); break; } default: diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 2b267c9a1806..526c01ec8003 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -426,7 +426,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) while (1) { dbg_gen("feed '%s', ino %llu, new f_pos %#x", - dent->name, le64_to_cpu(dent->inum), + dent->name, (unsigned long long)le64_to_cpu(dent->inum), key_hash_flash(c, &dent->key)); ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum); diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c index e045c8b55423..47814cde2407 100644 --- a/fs/ubifs/find.c +++ b/fs/ubifs/find.c @@ -507,7 +507,6 @@ int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *free, rsvd_idx_lebs = 0; lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - c->lst.taken_empty_lebs; - ubifs_assert(lebs + c->lst.idx_lebs >= c->min_idx_lebs); if (rsvd_idx_lebs < lebs) /* * OK to allocate an empty LEB, but we still don't want to go diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index 13f1019c859f..02aba36fe3d4 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c @@ -334,15 +334,15 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) err = move_nodes(c, sleb); if (err) - goto out; + goto out_inc_seq; err = gc_sync_wbufs(c); if (err) - goto out; + goto out_inc_seq; err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0); if (err) - goto out; + goto out_inc_seq; /* Allow for races with TNC */ c->gced_lnum = lnum; @@ -369,6 +369,14 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) out: ubifs_scan_destroy(sleb); return err; + +out_inc_seq: + /* We may have moved at least some nodes so allow for races with TNC */ + c->gced_lnum = lnum; + smp_wmb(); + c->gc_seq += 1; + smp_wmb(); + goto out; } /** diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 7562464ac83f..3f4902060c7a 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1024,14 +1024,13 @@ static int mount_ubifs(struct ubifs_info *c) goto out_dereg; } + sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); if (!mounted_read_only) { err = alloc_wbufs(c); if (err) goto out_cbuf; /* Create background thread */ - sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, - c->vi.vol_id); c->bgt = kthread_create(ubifs_bg_thread, c, c->bgt_name); if (!c->bgt) c->bgt = ERR_PTR(-EINVAL); diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 7da209ab9378..7634c5970887 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -1476,7 +1476,7 @@ again: } err = fallible_read_node(c, key, &zbr, node); - if (maybe_leb_gced(c, zbr.lnum, gc_seq1)) { + if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) { /* * The node may have been GC'ed out from under us so try again * while keeping the TNC mutex locked. diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 00e80df9dd9d..dbd9cef852ec 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -4118,7 +4118,7 @@ xfs_iext_indirect_to_direct( ASSERT(nextents <= XFS_LINEAR_EXTS); size = nextents * sizeof(xfs_bmbt_rec_t); - xfs_iext_irec_compact_full(ifp); + xfs_iext_irec_compact_pages(ifp); ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); ep = ifp->if_u1.if_ext_irec->er_extbuf; @@ -4449,8 +4449,7 @@ xfs_iext_irec_remove( * compaction policy is as follows: * * Full Compaction: Extents fit into a single page (or inline buffer) - * Full Compaction: Extents occupy less than 10% of allocated space - * Partial Compaction: Extents occupy > 10% and < 50% of allocated space + * Partial Compaction: Extents occupy less than 50% of allocated space * No Compaction: Extents occupy at least 50% of allocated space */ void @@ -4471,8 +4470,6 @@ xfs_iext_irec_compact( xfs_iext_direct_to_inline(ifp, nextents); } else if (nextents <= XFS_LINEAR_EXTS) { xfs_iext_indirect_to_direct(ifp); - } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) { - xfs_iext_irec_compact_full(ifp); } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { xfs_iext_irec_compact_pages(ifp); } @@ -4496,7 +4493,7 @@ xfs_iext_irec_compact_pages( erp_next = erp + 1; if (erp_next->er_extcount <= (XFS_LINEAR_EXTS - erp->er_extcount)) { - memmove(&erp->er_extbuf[erp->er_extcount], + memcpy(&erp->er_extbuf[erp->er_extcount], erp_next->er_extbuf, erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); erp->er_extcount += erp_next->er_extcount; @@ -4516,91 +4513,6 @@ xfs_iext_irec_compact_pages( } /* - * Fully compact the extent records managed by the indirection array. - */ -void -xfs_iext_irec_compact_full( - xfs_ifork_t *ifp) /* inode fork pointer */ -{ - xfs_bmbt_rec_host_t *ep, *ep_next; /* extent record pointers */ - xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */ - int erp_idx = 0; /* extent irec index */ - int ext_avail; /* empty entries in ex list */ - int ext_diff; /* number of exts to add */ - int nlists; /* number of irec's (ex lists) */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - erp = ifp->if_u1.if_ext_irec; - ep = &erp->er_extbuf[erp->er_extcount]; - erp_next = erp + 1; - ep_next = erp_next->er_extbuf; - - while (erp_idx < nlists - 1) { - /* - * Check how many extent records are available in this irec. - * If there is none skip the whole exercise. - */ - ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; - if (ext_avail) { - - /* - * Copy over as many as possible extent records into - * the previous page. - */ - ext_diff = MIN(ext_avail, erp_next->er_extcount); - memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); - erp->er_extcount += ext_diff; - erp_next->er_extcount -= ext_diff; - - /* - * If the next irec is empty now we can simply - * remove it. - */ - if (erp_next->er_extcount == 0) { - /* - * Free page before removing extent record - * so er_extoffs don't get modified in - * xfs_iext_irec_remove. - */ - kmem_free(erp_next->er_extbuf); - erp_next->er_extbuf = NULL; - xfs_iext_irec_remove(ifp, erp_idx + 1); - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - - /* - * If the next irec is not empty move up the content - * that has not been copied to the previous page to - * the beggining of this one. - */ - } else { - memmove(erp_next->er_extbuf, &ep_next[ext_diff], - erp_next->er_extcount * - sizeof(xfs_bmbt_rec_t)); - ep_next = erp_next->er_extbuf; - memset(&ep_next[erp_next->er_extcount], 0, - (XFS_LINEAR_EXTS - - erp_next->er_extcount) * - sizeof(xfs_bmbt_rec_t)); - } - } - - if (erp->er_extcount == XFS_LINEAR_EXTS) { - erp_idx++; - if (erp_idx < nlists) - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - else - break; - } - ep = &erp->er_extbuf[erp->er_extcount]; - erp_next = erp + 1; - ep_next = erp_next->er_extbuf; - } -} - -/* * This is called to update the er_extoff field in the indirection * array when extents have been added or removed from one of the * extent lists. erp_idx contains the irec index to begin updating diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h new file mode 100644 index 000000000000..fa4328f9124f --- /dev/null +++ b/include/asm-mips/cevt-r4k.h @@ -0,0 +1,46 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Kevin D. Kissell + */ + +/* + * Definitions used for common event timer implementation + * for MIPS 4K-type processors and their MIPS MT variants. + * Avoids unsightly extern declarations in C files. + */ +#ifndef __ASM_CEVT_R4K_H +#define __ASM_CEVT_R4K_H + +DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); + +void mips_event_handler(struct clock_event_device *dev); +int c0_compare_int_usable(void); +void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *); +irqreturn_t c0_compare_interrupt(int, void *); + +extern struct irqaction c0_compare_irqaction; +extern int cp0_timer_irq_installed; + +/* + * Possibly handle a performance counter interrupt. + * Return true if the timer interrupt should not be checked + */ + +static inline int handle_perf_irq(int r2) +{ + /* + * The performance counter overflow interrupt may be shared with the + * timer interrupt (cp0_perfcount_irq < 0). If it is and a + * performance counter has overflowed (perf_irq() == IRQ_HANDLED) + * and we can't reliably determine if a counter interrupt has also + * happened (!r2) then don't check for a timer interrupt. + */ + return (cp0_perfcount_irq < 0) && + perf_irq() == IRQ_HANDLED && + !r2; +} + +#endif /* __ASM_CEVT_R4K_H */ diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h index 881e8866501d..701ec0ba8fa9 100644 --- a/include/asm-mips/irqflags.h +++ b/include/asm-mips/irqflags.h @@ -38,8 +38,17 @@ __asm__( " .set pop \n" " .endm"); +extern void smtc_ipi_replay(void); + static inline void raw_local_irq_enable(void) { +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of call overhead on each local_irq_enable() + */ + smtc_ipi_replay(); +#endif __asm__ __volatile__( "raw_local_irq_enable" : /* no outputs */ @@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void) : "memory"); } + /* * For cli() we have to insert nops to make sure that the new value * has actually arrived in the status register before the end of this @@ -185,15 +195,14 @@ __asm__( " .set pop \n" " .endm \n"); -extern void smtc_ipi_replay(void); static inline void raw_local_irq_restore(unsigned long flags) { unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY +#ifdef CONFIG_MIPS_MT_SMTC /* - * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred + * SMTC kernel needs to do a software replay of queued * IPIs, at the cost of branch and call overhead on each * local_irq_restore() */ @@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags) : "memory"); } +static inline void __raw_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + __asm__ __volatile__( + "raw_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); +} + static inline int raw_irqs_disabled_flags(unsigned long flags) { #ifdef CONFIG_MIPS_MT_SMTC diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index a46f8e258e6b..979866000da4 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h @@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \ { \ unsigned int res; \ unsigned int omt; \ - unsigned int flags; \ + unsigned long flags; \ \ local_irq_save(flags); \ omt = __dmt(); \ @@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \ { \ unsigned int res; \ unsigned int omt; \ - unsigned int flags; \ + unsigned long flags; \ \ local_irq_save(flags); \ omt = __dmt(); \ @@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \ { \ unsigned int res; \ unsigned int omt; \ - unsigned int flags; \ + unsigned long flags; \ \ local_irq_save(flags); \ \ diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h index 4396e9ffd418..55813d6150c7 100644 --- a/include/asm-mips/pgtable-32.h +++ b/include/asm-mips/pgtable-32.h @@ -57,7 +57,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, #define PMD_ORDER 1 #define PTE_ORDER 0 -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h index 3639b28f80db..ea60bf08dcb0 100644 --- a/include/asm-mips/smtc.h +++ b/include/asm-mips/smtc.h @@ -6,6 +6,7 @@ */ #include <asm/mips_mt.h> +#include <asm/smtc_ipi.h> /* * System-wide SMTC status information @@ -38,14 +39,15 @@ struct mm_struct; struct task_struct; void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); - +void self_ipi(struct smtc_ipi *); void smtc_flush_tlb_asid(unsigned long asid); -extern int mipsmt_build_cpu_map(int startslot); -extern void mipsmt_prepare_cpus(void); +extern int smtc_build_cpu_map(int startslot); +extern void smtc_prepare_cpus(int cpus); extern void smtc_smp_finish(void); extern void smtc_boot_secondary(int cpu, struct task_struct *t); extern void smtc_cpus_done(void); + /* * Sharing the TLB between multiple VPEs means that the * "random" index selection function is not allowed to diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index 051e1af0bb95..4c37c4e5f72e 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h @@ -297,14 +297,31 @@ #ifdef CONFIG_MIPS_MT_SMTC .set mips32r2 /* - * This may not really be necessary if ints are already - * inhibited here. + * We need to make sure the read-modify-write + * of Status below isn't perturbed by an interrupt + * or cross-TC access, so we need to do at least a DMT, + * protected by an interrupt-inhibit. But setting IXMT + * also creates a few-cycle window where an IPI could + * be queued and not be detected before potentially + * returning to a WAIT or user-mode loop. It must be + * replayed. + * + * We're in the middle of a context switch, and + * we can't dispatch it directly without trashing + * some registers, so we'll try to detect this unlikely + * case and program a software interrupt in the VPE, + * as would be done for a cross-VPE IPI. To accomodate + * the handling of that case, we're doing a DVPE instead + * of just a DMT here to protect against other threads. + * This is a lot of cruft to cover a tiny window. + * If you can find a better design, implement it! + * */ mfc0 v0, CP0_TCSTATUS ori v0, TCSTATUS_IXMT mtc0 v0, CP0_TCSTATUS _ehb - DMT 5 # dmt a1 + DVPE 5 # dvpe a1 jal mips_ihb #endif /* CONFIG_MIPS_MT_SMTC */ mfc0 a0, CP0_STATUS @@ -325,17 +342,50 @@ */ LONG_L v1, PT_TCSTATUS(sp) _ehb - mfc0 v0, CP0_TCSTATUS + mfc0 a0, CP0_TCSTATUS andi v1, TCSTATUS_IXMT - /* We know that TCStatua.IXMT should be set from above */ - xori v0, v0, TCSTATUS_IXMT - or v0, v0, v1 - mtc0 v0, CP0_TCSTATUS - _ehb - andi a1, a1, VPECONTROL_TE + bnez v1, 0f + +/* + * We'd like to detect any IPIs queued in the tiny window + * above and request an software interrupt to service them + * when we ERET. + * + * Computing the offset into the IPIQ array of the executing + * TC's IPI queue in-line would be tedious. We use part of + * the TCContext register to hold 16 bits of offset that we + * can add in-line to find the queue head. + */ + mfc0 v0, CP0_TCCONTEXT + la a2, IPIQ + srl v0, v0, 16 + addu a2, a2, v0 + LONG_L v0, 0(a2) + beqz v0, 0f +/* + * If we have a queue, provoke dispatch within the VPE by setting C_SW1 + */ + mfc0 v0, CP0_CAUSE + ori v0, v0, C_SW1 + mtc0 v0, CP0_CAUSE +0: + /* + * This test should really never branch but + * let's be prudent here. Having atomized + * the shared register modifications, we can + * now EVPE, and must do so before interrupts + * are potentially re-enabled. + */ + andi a1, a1, MVPCONTROL_EVP beqz a1, 1f - emt + evpe 1: + /* We know that TCStatua.IXMT should be set from above */ + xori a0, a0, TCSTATUS_IXMT + or a0, a0, v1 + mtc0 a0, CP0_TCSTATUS + _ehb + .set mips0 #endif /* CONFIG_MIPS_MT_SMTC */ LONG_L v1, PT_EPC(sp) diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index 635d764dc13e..35d1743b57ac 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h @@ -140,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A) return 1; + else if (boot_cpu_has(X86_FEATURE_AMDC1E)) + return 1; else return max_cstate; } diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 9489283a4bcf..cfcfb0a806ba 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -81,6 +81,7 @@ #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ +#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h index d240e5b30a45..cbb649123612 100644 --- a/include/asm-x86/idle.h +++ b/include/asm-x86/idle.h @@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n); void enter_idle(void); void exit_idle(void); +void c1e_remove_cpu(int cpu); + #endif diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h index 484c47554f3b..94d63db10365 100644 --- a/include/asm-x86/kgdb.h +++ b/include/asm-x86/kgdb.h @@ -39,12 +39,13 @@ enum regnames { GDB_FS, /* 14 */ GDB_GS, /* 15 */ }; +#define NUMREGBYTES ((GDB_GS+1)*4) #else /* ! CONFIG_X86_32 */ -enum regnames { +enum regnames64 { GDB_AX, /* 0 */ - GDB_DX, /* 1 */ + GDB_BX, /* 1 */ GDB_CX, /* 2 */ - GDB_BX, /* 3 */ + GDB_DX, /* 3 */ GDB_SI, /* 4 */ GDB_DI, /* 5 */ GDB_BP, /* 6 */ @@ -58,18 +59,15 @@ enum regnames { GDB_R14, /* 14 */ GDB_R15, /* 15 */ GDB_PC, /* 16 */ - GDB_PS, /* 17 */ }; -#endif /* CONFIG_X86_32 */ -/* - * Number of bytes of registers: - */ -#ifdef CONFIG_X86_32 -# define NUMREGBYTES 64 -#else -# define NUMREGBYTES ((GDB_PS+1)*8) -#endif +enum regnames32 { + GDB_PS = 34, + GDB_CS, + GDB_SS, +}; +#define NUMREGBYTES ((GDB_SS+1)*4) +#endif /* CONFIG_X86_32 */ static inline void arch_kgdb_breakpoint(void) { diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index 515d4dce96b5..45806d60bcbe 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h @@ -7,6 +7,7 @@ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/prefetch.h> +#include <linux/lockdep.h> #include <asm/page.h> /* diff --git a/arch/arm/include/asm/cnt32_to_63.h b/include/linux/cnt32_to_63.h index 480c873fa746..8c0f9505b48c 100644 --- a/arch/arm/include/asm/cnt32_to_63.h +++ b/include/linux/cnt32_to_63.h @@ -1,5 +1,5 @@ /* - * include/asm/cnt32_to_63.h -- extend a 32-bit counter to 63 bits + * Extend a 32-bit counter to 63 bits * * Author: Nicolas Pitre * Created: December 3, 2006 @@ -10,15 +10,30 @@ * as published by the Free Software Foundation. */ -#ifndef __INCLUDE_CNT32_TO_63_H__ -#define __INCLUDE_CNT32_TO_63_H__ +#ifndef __LINUX_CNT32_TO_63_H__ +#define __LINUX_CNT32_TO_63_H__ #include <linux/compiler.h> -#include <asm/types.h> +#include <linux/types.h> #include <asm/byteorder.h> -/* - * Prototype: u64 cnt32_to_63(u32 cnt) +/* this is used only to give gcc a clue about good code generation */ +union cnt32_to_63 { + struct { +#if defined(__LITTLE_ENDIAN) + u32 lo, hi; +#elif defined(__BIG_ENDIAN) + u32 hi, lo; +#endif + }; + u64 val; +}; + + +/** + * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter + * @cnt_lo: The low part of the counter + * * Many hardware clock counters are only 32 bits wide and therefore have * a relatively short period making wrap-arounds rather frequent. This * is a problem when implementing sched_clock() for example, where a 64-bit @@ -51,26 +66,13 @@ * clear-bit instruction. Otherwise caller must remember to clear the top * bit explicitly. */ - -/* this is used only to give gcc a clue about good code generation */ -typedef union { - struct { -#if defined(__LITTLE_ENDIAN) - u32 lo, hi; -#elif defined(__BIG_ENDIAN) - u32 hi, lo; -#endif - }; - u64 val; -} cnt32_to_63_t; - #define cnt32_to_63(cnt_lo) \ ({ \ - static volatile u32 __m_cnt_hi = 0; \ - cnt32_to_63_t __x; \ + static volatile u32 __m_cnt_hi; \ + union cnt32_to_63 __x; \ __x.hi = __m_cnt_hi; \ __x.lo = (cnt_lo); \ - if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ + if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ __x.val; \ }) diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6d93dce61cbb..2f245fe63bda 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -47,14 +47,22 @@ enum hrtimer_restart { * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and * does not restart the timer - * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context - * Special mode for tick emultation + * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context + * Special mode for tick emulation and + * scheduler timer. Such timers are per + * cpu and not allowed to be migrated on + * cpu unplug. + * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context + * with timer->base lock unlocked + * used for timers which call wakeup to + * avoid lock order problems with rq->lock */ enum hrtimer_cb_mode { HRTIMER_CB_SOFTIRQ, HRTIMER_CB_IRQSAFE, HRTIMER_CB_IRQSAFE_NO_RESTART, - HRTIMER_CB_IRQSAFE_NO_SOFTIRQ, + HRTIMER_CB_IRQSAFE_PERCPU, + HRTIMER_CB_IRQSAFE_UNLOCKED, }; /* @@ -67,9 +75,10 @@ enum hrtimer_cb_mode { * 0x02 callback function running * 0x04 callback pending (high resolution mode) * - * Special case: + * Special cases: * 0x03 callback function running and enqueued * (was requeued on another CPU) + * 0x09 timer was migrated on CPU hotunplug * The "callback function running and enqueued" status is only possible on * SMP. It happens for example when a posix timer expired and the callback * queued a signal. Between dropping the lock which protects the posix timer @@ -87,6 +96,7 @@ enum hrtimer_cb_mode { #define HRTIMER_STATE_ENQUEUED 0x01 #define HRTIMER_STATE_CALLBACK 0x02 #define HRTIMER_STATE_PENDING 0x04 +#define HRTIMER_STATE_MIGRATE 0x08 /** * struct hrtimer - the basic hrtimer structure diff --git a/include/linux/pci.h b/include/linux/pci.h index c0e14008a3c2..98dc6243a706 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -534,7 +534,7 @@ extern void pci_sort_breadthfirst(void); #ifdef CONFIG_PCI_LEGACY struct pci_dev __deprecated *pci_find_device(unsigned int vendor, unsigned int device, - const struct pci_dev *from); + struct pci_dev *from); struct pci_dev __deprecated *pci_find_slot(unsigned int bus, unsigned int devfn); #endif /* CONFIG_PCI_LEGACY */ @@ -550,7 +550,7 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, - const struct pci_dev *from); + struct pci_dev *from); struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); @@ -816,7 +816,7 @@ _PCI_NOP_ALL(write,) static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, - const struct pci_dev *from) + struct pci_dev *from) { return NULL; } @@ -838,7 +838,7 @@ static inline struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, - const struct pci_dev *from) + struct pci_dev *from) { return NULL; } diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index b160fb18e8d6..37aaf2b39863 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -6,6 +6,7 @@ extern int ramfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt); #ifndef CONFIG_MMU +extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, diff --git a/include/linux/smb.h b/include/linux/smb.h index caa43b2370cb..82fefddc5987 100644 --- a/include/linux/smb.h +++ b/include/linux/smb.h @@ -11,7 +11,9 @@ #include <linux/types.h> #include <linux/magic.h> +#ifdef __KERNEL__ #include <linux/time.h> +#endif enum smb_protocol { SMB_PROTOCOL_NONE, diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 5da9794b2d78..b106fd8e0d5c 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -1,6 +1,8 @@ #ifndef __LINUX_STACKTRACE_H #define __LINUX_STACKTRACE_H +struct task_struct; + #ifdef CONFIG_STACKTRACE struct stack_trace { unsigned int nr_entries, max_entries; diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index b3d3e27c6299..c3626c0ba9d3 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -596,4 +596,5 @@ int p9_idpool_check(int id, struct p9_idpool *p); int p9_error_init(void); int p9_errstr2errno(char *, int); int p9_trans_fd_init(void); +void p9_trans_fd_exit(void); #endif /* NET_9P_H */ diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h index 0db3a4038dc0..3ca737120a90 100644 --- a/include/net/9p/transport.h +++ b/include/net/9p/transport.h @@ -26,6 +26,8 @@ #ifndef NET_9P_TRANSPORT_H #define NET_9P_TRANSPORT_H +#include <linux/module.h> + /** * enum p9_trans_status - different states of underlying transports * @Connected: transport is connected and healthy @@ -91,9 +93,12 @@ struct p9_trans_module { int maxsize; /* max message size of transport */ int def; /* this transport should be default */ struct p9_trans * (*create)(const char *, char *, int, unsigned char); + struct module *owner; }; void v9fs_register_trans(struct p9_trans_module *m); -struct p9_trans_module *v9fs_match_trans(const substring_t *name); -struct p9_trans_module *v9fs_default_trans(void); +void v9fs_unregister_trans(struct p9_trans_module *m); +struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name); +struct p9_trans_module *v9fs_get_default_trans(void); +void v9fs_put_trans(struct p9_trans_module *m); #endif /* NET_9P_TRANSPORT_H */ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 24811732bdb2..029a54a02396 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -227,6 +227,9 @@ struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, const size_t ); +struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *, + const struct sctp_chunk *, + struct sctp_paramhdr *); struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *, const struct sctp_transport *, const void *payload, diff --git a/init/main.c b/init/main.c index f6f7042331dc..3820323c4c84 100644 --- a/init/main.c +++ b/init/main.c @@ -708,7 +708,7 @@ int do_one_initcall(initcall_t fn) int result; if (initcall_debug) { - print_fn_descriptor_symbol("calling %s\n", fn); + printk("calling %pF\n", fn); t0 = ktime_get(); } @@ -718,8 +718,8 @@ int do_one_initcall(initcall_t fn) t1 = ktime_get(); delta = ktime_sub(t1, t0); - print_fn_descriptor_symbol("initcall %s", fn); - printk(" returned %d after %Ld msecs\n", result, + printk("initcall %pF returned %d after %Ld msecs\n", + fn, result, (unsigned long long) delta.tv64 >> 20); } @@ -737,8 +737,7 @@ int do_one_initcall(initcall_t fn) local_irq_enable(); } if (msgbuf[0]) { - print_fn_descriptor_symbol(KERN_WARNING "initcall %s", fn); - printk(" returned with %s\n", msgbuf); + printk("initcall %pF returned with %s\n", fn, msgbuf); } return result; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 13932abde159..a0123d75ec9a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2738,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child) */ void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) { - struct cgroup *oldcgrp, *newcgrp; + struct cgroup *oldcgrp, *newcgrp = NULL; if (need_mm_owner_callback) { int i; for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { struct cgroup_subsys *ss = subsys[i]; oldcgrp = task_cgroup(old, ss->subsys_id); - newcgrp = task_cgroup(new, ss->subsys_id); + if (new) + newcgrp = task_cgroup(new, ss->subsys_id); if (oldcgrp == newcgrp) continue; if (ss->mm_owner_changed) diff --git a/kernel/exit.c b/kernel/exit.c index 16395644a98f..85a83c831856 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) * If there are other users of the mm and the owner (us) is exiting * we need to find a new owner to take on the responsibility. */ - if (!mm) - return 0; if (atomic_read(&mm->mm_users) <= 1) return 0; if (mm->owner != p) @@ -627,6 +625,16 @@ retry: } while_each_thread(g, c); read_unlock(&tasklist_lock); + /* + * We found no owner yet mm_users > 1: this implies that we are + * most likely racing with swapoff (try_to_unuse()) or /proc or + * ptrace or page migration (get_task_mm()). Mark owner as NULL, + * so that subsystems can understand the callback and take action. + */ + down_write(&mm->mmap_sem); + cgroup_mm_owner_callbacks(mm->owner, NULL); + mm->owner = NULL; + up_write(&mm->mmap_sem); return; assign_new_owner: diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b8e4dce80a74..cdec83e722fa 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -672,13 +672,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, */ BUG_ON(timer->function(timer) != HRTIMER_NORESTART); return 1; - case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: + case HRTIMER_CB_IRQSAFE_PERCPU: + case HRTIMER_CB_IRQSAFE_UNLOCKED: /* * This is solely for the sched tick emulation with * dynamic tick support to ensure that we do not * restart the tick right on the edge and end up with * the tick timer in the softirq ! The calling site - * takes care of this. + * takes care of this. Also used for hrtimer sleeper ! */ debug_hrtimer_deactivate(timer); return 1; @@ -1245,7 +1246,8 @@ static void __run_hrtimer(struct hrtimer *timer) timer_stats_account_hrtimer(timer); fn = timer->function; - if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { + if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || + timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { /* * Used for scheduler timers, avoid lock inversion with * rq->lock and tasklist_lock. @@ -1452,7 +1454,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) sl->timer.function = hrtimer_wakeup; sl->task = task; #ifdef CONFIG_HIGH_RES_TIMERS - sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; + sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; #endif } @@ -1591,29 +1593,95 @@ static void __cpuinit init_hrtimers_cpu(int cpu) #ifdef CONFIG_HOTPLUG_CPU -static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, - struct hrtimer_clock_base *new_base) +static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, + struct hrtimer_clock_base *new_base, int dcpu) { struct hrtimer *timer; struct rb_node *node; + int raise = 0; while ((node = rb_first(&old_base->active))) { timer = rb_entry(node, struct hrtimer, node); BUG_ON(hrtimer_callback_running(timer)); debug_hrtimer_deactivate(timer); - __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); + + /* + * Should not happen. Per CPU timers should be + * canceled _before_ the migration code is called + */ + if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) { + __remove_hrtimer(timer, old_base, + HRTIMER_STATE_INACTIVE, 0); + WARN(1, "hrtimer (%p %p)active but cpu %d dead\n", + timer, timer->function, dcpu); + continue; + } + + /* + * Mark it as STATE_MIGRATE not INACTIVE otherwise the + * timer could be seen as !active and just vanish away + * under us on another CPU + */ + __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); timer->base = new_base; /* * Enqueue the timer. Allow reprogramming of the event device */ enqueue_hrtimer(timer, new_base, 1); + +#ifdef CONFIG_HIGH_RES_TIMERS + /* + * Happens with high res enabled when the timer was + * already expired and the callback mode is + * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The + * enqueue code does not move them to the soft irq + * pending list for performance/latency reasons, but + * in the migration state, we need to do that + * otherwise we end up with a stale timer. + */ + if (timer->state == HRTIMER_STATE_MIGRATE) { + timer->state = HRTIMER_STATE_PENDING; + list_add_tail(&timer->cb_entry, + &new_base->cpu_base->cb_pending); + raise = 1; + } +#endif + /* Clear the migration state bit */ + timer->state &= ~HRTIMER_STATE_MIGRATE; + } + return raise; +} + +#ifdef CONFIG_HIGH_RES_TIMERS +static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, + struct hrtimer_cpu_base *new_base) +{ + struct hrtimer *timer; + int raise = 0; + + while (!list_empty(&old_base->cb_pending)) { + timer = list_entry(old_base->cb_pending.next, + struct hrtimer, cb_entry); + + __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0); + timer->base = &new_base->clock_base[timer->base->index]; + list_add_tail(&timer->cb_entry, &new_base->cb_pending); + raise = 1; } + return raise; +} +#else +static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, + struct hrtimer_cpu_base *new_base) +{ + return 0; } +#endif static void migrate_hrtimers(int cpu) { struct hrtimer_cpu_base *old_base, *new_base; - int i; + int i, raise = 0; BUG_ON(cpu_online(cpu)); old_base = &per_cpu(hrtimer_bases, cpu); @@ -1626,14 +1694,21 @@ static void migrate_hrtimers(int cpu) spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { - migrate_hrtimer_list(&old_base->clock_base[i], - &new_base->clock_base[i]); + if (migrate_hrtimer_list(&old_base->clock_base[i], + &new_base->clock_base[i], cpu)) + raise = 1; } + if (migrate_hrtimer_pending(old_base, new_base)) + raise = 1; + spin_unlock(&old_base->lock); spin_unlock(&new_base->lock); local_irq_enable(); put_cpu_var(hrtimer_bases); + + if (raise) + hrtimer_raise_softirq(); } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/kernel/kexec.c b/kernel/kexec.c index 59f3f0df35d4..aef265325cd3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -753,8 +753,14 @@ static struct page *kimage_alloc_page(struct kimage *image, *old = addr | (*old & ~PAGE_MASK); /* The old page I have found cannot be a - * destination page, so return it. + * destination page, so return it if it's + * gfp_flags honor the ones passed in. */ + if (!(gfp_mask & __GFP_HIGHMEM) && + PageHighMem(old_page)) { + kimage_free_pages(old_page); + continue; + } addr = old_addr; page = old_page; break; diff --git a/kernel/kgdb.c b/kernel/kgdb.c index eaa21fc9ad1d..25d955dbb989 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c @@ -488,7 +488,7 @@ static int write_mem_msg(int binary) if (err) return err; if (CACHE_FLUSH_IS_SAFE) - flush_icache_range(addr, addr + length + 1); + flush_icache_range(addr, addr + length); return 0; } @@ -1462,7 +1462,7 @@ acquirelock: * Get the passive CPU lock which will hold all the non-primary * CPU in a spin state while the debugger is active */ - if (!kgdb_single_step || !kgdb_contthread) { + if (!kgdb_single_step) { for (i = 0; i < NR_CPUS; i++) atomic_set(&passive_cpu_wait[i], 1); } @@ -1475,7 +1475,7 @@ acquirelock: #ifdef CONFIG_SMP /* Signal the other CPUs to enter kgdb_wait() */ - if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) + if ((!kgdb_single_step) && kgdb_do_roundup) kgdb_roundup_cpus(flags); #endif @@ -1494,7 +1494,7 @@ acquirelock: kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); kgdb_deactivate_sw_breakpoints(); kgdb_single_step = 0; - kgdb_contthread = NULL; + kgdb_contthread = current; exception_level = 0; /* Talk to debugger with gdbserial protocol */ @@ -1508,7 +1508,7 @@ acquirelock: kgdb_info[ks->cpu].task = NULL; atomic_set(&cpu_in_kgdb[ks->cpu], 0); - if (!kgdb_single_step || !kgdb_contthread) { + if (!kgdb_single_step) { for (i = NR_CPUS-1; i >= 0; i--) atomic_set(&passive_cpu_wait[i], 0); /* diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index e36d5798cbff..5131e5471169 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -441,7 +441,7 @@ static struct k_itimer * alloc_posix_timer(void) return tmr; if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { kmem_cache_free(posix_timers_cache, tmr); - tmr = NULL; + return NULL; } memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); return tmr; diff --git a/kernel/sched.c b/kernel/sched.c index 98890807375b..ad1962dc0aa2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -201,7 +201,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rt_b->rt_period_timer.function = sched_rt_period_timer; - rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; + rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; } static void start_rt_bandwidth(struct rt_bandwidth *rt_b) @@ -1087,7 +1087,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) return NOTIFY_DONE; } -static void init_hrtick(void) +static __init void init_hrtick(void) { hotcpu_notifier(hotplug_hrtick, 0); } @@ -1119,7 +1119,7 @@ static void init_rq_hrtick(struct rq *rq) hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rq->hrtick_timer.function = hrtick; - rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; + rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; } #else static inline void hrtick_clear(struct rq *rq) diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f1f3eee28113..cb01cd8f919b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -235,7 +235,8 @@ static void tick_do_broadcast_on_off(void *why) case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: if (!cpu_isset(cpu, tick_broadcast_mask)) { cpu_set(cpu, tick_broadcast_mask); - if (td->mode == TICKDEV_MODE_PERIODIC) + if (tick_broadcast_device.mode == + TICKDEV_MODE_PERIODIC) clockevents_shutdown(dev); } if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) @@ -245,7 +246,8 @@ static void tick_do_broadcast_on_off(void *why) if (!tick_broadcast_force && cpu_isset(cpu, tick_broadcast_mask)) { cpu_clear(cpu, tick_broadcast_mask); - if (td->mode == TICKDEV_MODE_PERIODIC) + if (tick_broadcast_device.mode == + TICKDEV_MODE_PERIODIC) tick_setup_periodic(dev, 0); } break; @@ -575,4 +577,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) spin_unlock_irqrestore(&tick_broadcast_lock, flags); } +/* + * Check, whether the broadcast device is in one shot mode + */ +int tick_broadcast_oneshot_active(void) +{ + return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; +} + #endif diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 019315ebf9de..df12434b43ca 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); */ ktime_t tick_next_period; ktime_t tick_period; -int tick_do_timer_cpu __read_mostly = -1; +int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; DEFINE_SPINLOCK(tick_device_lock); /* @@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) if (!tick_device_is_functional(dev)) return; - if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { + if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && + !tick_broadcast_oneshot_active()) { clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); } else { unsigned long seq; @@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, * If no cpu took the do_timer update, assign it to * this cpu: */ - if (tick_do_timer_cpu == -1) { + if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { tick_do_timer_cpu = cpu; tick_next_period = ktime_get(); tick_period = ktime_set(0, NSEC_PER_SEC / HZ); @@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) if (*cpup == tick_do_timer_cpu) { int cpu = first_cpu(cpu_online_map); - tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; + tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : + TICK_DO_TIMER_NONE; } spin_unlock_irqrestore(&tick_device_lock, flags); } diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 6e9db9734aa6..469248782c23 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -1,6 +1,10 @@ /* * tick internal variable and functions used by low/high res code */ + +#define TICK_DO_TIMER_NONE -1 +#define TICK_DO_TIMER_BOOT -2 + DECLARE_PER_CPU(struct tick_device, tick_cpu_device); extern spinlock_t tick_device_lock; extern ktime_t tick_next_period; @@ -31,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); extern void tick_broadcast_switch_to_oneshot(void); extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); +extern int tick_broadcast_oneshot_active(void); # else /* BROADCAST */ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { @@ -39,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) static inline void tick_broadcast_oneshot_control(unsigned long reason) { } static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } +static inline int tick_broadcast_oneshot_active(void) { return 0; } # endif /* !BROADCAST */ #else /* !ONESHOT */ @@ -68,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) { return 0; } +static inline int tick_broadcast_oneshot_active(void) { return 0; } #endif /* !TICK_ONESHOT */ /* diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a87b0468568b..cb02324bdb88 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now) incr * ticks); } do_timer(++ticks); + + /* Keep the tick_next_period variable up to date */ + tick_next_period = ktime_add(last_jiffies_update, tick_period); } write_sequnlock(&xtime_lock); } @@ -221,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle) */ if (unlikely(!cpu_online(cpu))) { if (cpu == tick_do_timer_cpu) - tick_do_timer_cpu = -1; + tick_do_timer_cpu = TICK_DO_TIMER_NONE; } if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) @@ -303,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle) * invoked. */ if (cpu == tick_do_timer_cpu) - tick_do_timer_cpu = -1; + tick_do_timer_cpu = TICK_DO_TIMER_NONE; ts->idle_sleeps++; @@ -468,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) * this duty, then the jiffies update is still serialized by * xtime_lock. */ - if (unlikely(tick_do_timer_cpu == -1)) + if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) tick_do_timer_cpu = cpu; /* Check, if the jiffies need an update */ @@ -570,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) * this duty, then the jiffies update is still serialized by * xtime_lock. */ - if (unlikely(tick_do_timer_cpu == -1)) + if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) tick_do_timer_cpu = cpu; #endif @@ -622,7 +625,7 @@ void tick_setup_sched_timer(void) */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ts->sched_timer.function = tick_sched_timer; - ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; + ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; /* Get the next period (per cpu) */ ts->sched_timer.expires = tick_init_jiffy_update(); diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index bb948e52ce20..db58fb66a135 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -202,7 +202,7 @@ static void start_stack_timer(int cpu) hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = stack_trace_timer_fn; - hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; + hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0f1f7a7374ba..36896f3eb7f5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) { + /* + * mm_update_next_owner() may clear mm->owner to NULL + * if it races with swapoff, page migration, etc. + * So this can be called with p == NULL. + */ + if (unlikely(!p)) + return NULL; + return container_of(task_subsys_state(p, mem_cgroup_subsys_id), struct mem_cgroup, css); } @@ -549,6 +557,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, if (likely(!memcg)) { rcu_read_lock(); mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (unlikely(!mem)) { + rcu_read_unlock(); + kmem_cache_free(page_cgroup_cache, pc); + return 0; + } /* * For every charge from the cgroup, increment reference count */ @@ -801,11 +814,16 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) rcu_read_lock(); mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (unlikely(!mem)) { + rcu_read_unlock(); + return 0; + } css_get(&mem->css); rcu_read_unlock(); do { progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); + progress += res_counter_check_under_limit(&mem->res); } while (!progress && --retry); css_put(&mem->css); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e293c58bea58..27b8681139fd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -268,13 +268,14 @@ void prep_compound_page(struct page *page, unsigned long order) { int i; int nr_pages = 1 << order; + struct page *p = page + 1; set_compound_page_dtor(page, free_compound_page); set_compound_order(page, order); __SetPageHead(page); - for (i = 1; i < nr_pages; i++) { - struct page *p = page + i; - + for (i = 1; i < nr_pages; i++, p++) { + if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0)) + p = pfn_to_page(page_to_pfn(page) + i); __SetPageTail(p); p->first_page = page; } @@ -284,6 +285,7 @@ static void destroy_compound_page(struct page *page, unsigned long order) { int i; int nr_pages = 1 << order; + struct page *p = page + 1; if (unlikely(compound_order(page) != order)) bad_page(page); @@ -291,8 +293,9 @@ static void destroy_compound_page(struct page *page, unsigned long order) if (unlikely(!PageHead(page))) bad_page(page); __ClearPageHead(page); - for (i = 1; i < nr_pages; i++) { - struct page *p = page + i; + for (i = 1; i < nr_pages; i++, p++) { + if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0)) + p = pfn_to_page(page_to_pfn(page) + i); if (unlikely(!PageTail(p) | (p->first_page != page))) diff --git a/mm/page_isolation.c b/mm/page_isolation.c index c69f84fe038d..b70a7fec1ff6 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -114,8 +114,10 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) { - unsigned long pfn; + unsigned long pfn, flags; struct page *page; + struct zone *zone; + int ret; pfn = start_pfn; /* @@ -131,7 +133,9 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) if (pfn < end_pfn) return -EBUSY; /* Check all pages are free or Marked as ISOLATED */ - if (__test_page_isolated_in_pageblock(start_pfn, end_pfn)) - return 0; - return -EBUSY; + zone = page_zone(pfn_to_page(pfn)); + spin_lock_irqsave(&zone->lock, flags); + ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); + spin_unlock_irqrestore(&zone->lock, flags); + return ret ? 0 : -EBUSY; } diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c index ae532f501943..8d7a27a6335c 100644 --- a/mm/tiny-shmem.c +++ b/mm/tiny-shmem.c @@ -65,31 +65,31 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) if (!dentry) goto put_memory; + error = -ENFILE; + file = get_empty_filp(); + if (!file) + goto put_dentry; + error = -ENOSPC; inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); if (!inode) - goto put_dentry; + goto close_file; d_instantiate(dentry, inode); - error = -ENFILE; - file = alloc_file(shm_mnt, dentry, FMODE_WRITE | FMODE_READ, - &ramfs_file_operations); - if (!file) - goto put_dentry; - + inode->i_size = size; inode->i_nlink = 0; /* It is unlinked */ + init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, + &ramfs_file_operations); - /* notify everyone as to the change of file size */ - error = do_truncate(dentry, size, 0, file); - if (error < 0) +#ifndef CONFIG_MMU + error = ramfs_nommu_expand_for_mapping(inode, size); + if (error) goto close_file; - +#endif return file; close_file: put_filp(file); - return ERR_PTR(error); - put_dentry: dput(dentry); put_memory: diff --git a/net/9p/client.c b/net/9p/client.c index 2ffe40cf2f01..10e320307ec0 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -75,7 +75,6 @@ static int parse_opts(char *opts, struct p9_client *clnt) int option; int ret = 0; - clnt->trans_mod = v9fs_default_trans(); clnt->dotu = 1; clnt->msize = 8192; @@ -108,7 +107,7 @@ static int parse_opts(char *opts, struct p9_client *clnt) clnt->msize = option; break; case Opt_trans: - clnt->trans_mod = v9fs_match_trans(&args[0]); + clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); break; case Opt_legacy: clnt->dotu = 0; @@ -117,6 +116,10 @@ static int parse_opts(char *opts, struct p9_client *clnt) continue; } } + + if (!clnt->trans_mod) + clnt->trans_mod = v9fs_get_default_trans(); + kfree(options); return ret; } @@ -150,6 +153,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) if (!clnt) return ERR_PTR(-ENOMEM); + clnt->trans_mod = NULL; clnt->trans = NULL; spin_lock_init(&clnt->lock); INIT_LIST_HEAD(&clnt->fidlist); @@ -235,6 +239,8 @@ void p9_client_destroy(struct p9_client *clnt) clnt->trans = NULL; } + v9fs_put_trans(clnt->trans_mod); + list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) p9_fid_destroy(fid); diff --git a/net/9p/conv.c b/net/9p/conv.c index 44547201f5bc..5ad3a3bd73b2 100644 --- a/net/9p/conv.c +++ b/net/9p/conv.c @@ -451,8 +451,10 @@ p9_put_data(struct cbuf *bufp, const char *data, int count, unsigned char **pdata) { *pdata = buf_alloc(bufp, count); + if (*pdata == NULL) + return -ENOMEM; memmove(*pdata, data, count); - return count; + return 0; } static int @@ -460,6 +462,8 @@ p9_put_user_data(struct cbuf *bufp, const char __user *data, int count, unsigned char **pdata) { *pdata = buf_alloc(bufp, count); + if (*pdata == NULL) + return -ENOMEM; return copy_from_user(*pdata, data, count); } diff --git a/net/9p/mod.c b/net/9p/mod.c index bdee1fb7cc62..1084feb24cb0 100644 --- a/net/9p/mod.c +++ b/net/9p/mod.c @@ -31,6 +31,7 @@ #include <linux/parser.h> #include <net/9p/transport.h> #include <linux/list.h> +#include <linux/spinlock.h> #ifdef CONFIG_NET_9P_DEBUG unsigned int p9_debug_level = 0; /* feature-rific global debug level */ @@ -44,8 +45,8 @@ MODULE_PARM_DESC(debug, "9P debugging level"); * */ +static DEFINE_SPINLOCK(v9fs_trans_lock); static LIST_HEAD(v9fs_trans_list); -static struct p9_trans_module *v9fs_default_transport; /** * v9fs_register_trans - register a new transport with 9p @@ -54,48 +55,87 @@ static struct p9_trans_module *v9fs_default_transport; */ void v9fs_register_trans(struct p9_trans_module *m) { + spin_lock(&v9fs_trans_lock); list_add_tail(&m->list, &v9fs_trans_list); - if (m->def) - v9fs_default_transport = m; + spin_unlock(&v9fs_trans_lock); } EXPORT_SYMBOL(v9fs_register_trans); /** - * v9fs_match_trans - match transport versus registered transports + * v9fs_unregister_trans - unregister a 9p transport + * @m: the transport to remove + * + */ +void v9fs_unregister_trans(struct p9_trans_module *m) +{ + spin_lock(&v9fs_trans_lock); + list_del_init(&m->list); + spin_unlock(&v9fs_trans_lock); +} +EXPORT_SYMBOL(v9fs_unregister_trans); + +/** + * v9fs_get_trans_by_name - get transport with the matching name * @name: string identifying transport * */ -struct p9_trans_module *v9fs_match_trans(const substring_t *name) +struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name) { - struct list_head *p; - struct p9_trans_module *t = NULL; - - list_for_each(p, &v9fs_trans_list) { - t = list_entry(p, struct p9_trans_module, list); - if (strncmp(t->name, name->from, name->to-name->from) == 0) - return t; - } - return NULL; + struct p9_trans_module *t, *found = NULL; + + spin_lock(&v9fs_trans_lock); + + list_for_each_entry(t, &v9fs_trans_list, list) + if (strncmp(t->name, name->from, name->to-name->from) == 0 && + try_module_get(t->owner)) { + found = t; + break; + } + + spin_unlock(&v9fs_trans_lock); + return found; } -EXPORT_SYMBOL(v9fs_match_trans); +EXPORT_SYMBOL(v9fs_get_trans_by_name); /** - * v9fs_default_trans - returns pointer to default transport + * v9fs_get_default_trans - get the default transport * */ -struct p9_trans_module *v9fs_default_trans(void) +struct p9_trans_module *v9fs_get_default_trans(void) { - if (v9fs_default_transport) - return v9fs_default_transport; - else if (!list_empty(&v9fs_trans_list)) - return list_first_entry(&v9fs_trans_list, - struct p9_trans_module, list); - else - return NULL; + struct p9_trans_module *t, *found = NULL; + + spin_lock(&v9fs_trans_lock); + + list_for_each_entry(t, &v9fs_trans_list, list) + if (t->def && try_module_get(t->owner)) { + found = t; + break; + } + + if (!found) + list_for_each_entry(t, &v9fs_trans_list, list) + if (try_module_get(t->owner)) { + found = t; + break; + } + + spin_unlock(&v9fs_trans_lock); + return found; } -EXPORT_SYMBOL(v9fs_default_trans); +EXPORT_SYMBOL(v9fs_get_default_trans); +/** + * v9fs_put_trans - put trans + * @m: transport to put + * + */ +void v9fs_put_trans(struct p9_trans_module *m) +{ + if (m) + module_put(m->owner); +} /** * v9fs_init - Initialize module @@ -120,6 +160,8 @@ static int __init init_p9(void) static void __exit exit_p9(void) { printk(KERN_INFO "Unloading 9P2000 support\n"); + + p9_trans_fd_exit(); } module_init(init_p9) diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index cdf137af7adc..d652baf5ff91 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -151,7 +151,6 @@ struct p9_mux_poll_task { * @trans: reference to transport instance for this connection * @tagpool: id accounting for transactions * @err: error state - * @equeue: event wait_q (?) * @req_list: accounting for requests which have been sent * @unsent_req_list: accounting for requests that haven't been sent * @rcall: current response &p9_fcall structure @@ -178,7 +177,6 @@ struct p9_conn { struct p9_trans *trans; struct p9_idpool *tagpool; int err; - wait_queue_head_t equeue; struct list_head req_list; struct list_head unsent_req_list; struct p9_fcall *rcall; @@ -240,22 +238,6 @@ static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, static void p9_conn_cancel(struct p9_conn *m, int err); -static int p9_mux_global_init(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) - p9_mux_poll_tasks[i].task = NULL; - - p9_mux_wq = create_workqueue("v9fs"); - if (!p9_mux_wq) { - printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); - return -ENOMEM; - } - - return 0; -} - static u16 p9_mux_get_tag(struct p9_conn *m) { int tag; @@ -409,11 +391,11 @@ static void p9_mux_poll_stop(struct p9_conn *m) static struct p9_conn *p9_conn_create(struct p9_trans *trans) { int i, n; - struct p9_conn *m, *mtmp; + struct p9_conn *m; P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, trans->msize); - m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL); + m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); if (!m) return ERR_PTR(-ENOMEM); @@ -424,25 +406,14 @@ static struct p9_conn *p9_conn_create(struct p9_trans *trans) m->trans = trans; m->tagpool = p9_idpool_create(); if (IS_ERR(m->tagpool)) { - mtmp = ERR_PTR(-ENOMEM); kfree(m); - return mtmp; + return ERR_PTR(-ENOMEM); } - m->err = 0; - init_waitqueue_head(&m->equeue); INIT_LIST_HEAD(&m->req_list); INIT_LIST_HEAD(&m->unsent_req_list); - m->rcall = NULL; - m->rpos = 0; - m->rbuf = NULL; - m->wpos = m->wsize = 0; - m->wbuf = NULL; INIT_WORK(&m->rq, p9_read_work); INIT_WORK(&m->wq, p9_write_work); - m->wsched = 0; - memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); - m->poll_task = NULL; n = p9_mux_poll_start(m); if (n) { kfree(m); @@ -463,10 +434,8 @@ static struct p9_conn *p9_conn_create(struct p9_trans *trans) for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { if (IS_ERR(m->poll_waddr[i])) { p9_mux_poll_stop(m); - mtmp = (void *)m->poll_waddr; /* the error code */ kfree(m); - m = mtmp; - break; + return (void *)m->poll_waddr; /* the error code */ } } @@ -483,18 +452,13 @@ static void p9_conn_destroy(struct p9_conn *m) { P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, m->mux_list.prev, m->mux_list.next); - p9_conn_cancel(m, -ECONNRESET); - - if (!list_empty(&m->req_list)) { - /* wait until all processes waiting on this session exit */ - P9_DPRINTK(P9_DEBUG_MUX, - "mux %p waiting for empty request queue\n", m); - wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000); - P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m, - list_empty(&m->req_list)); - } p9_mux_poll_stop(m); + cancel_work_sync(&m->rq); + cancel_work_sync(&m->wq); + + p9_conn_cancel(m, -ECONNRESET); + m->trans = NULL; p9_idpool_destroy(m->tagpool); kfree(m); @@ -840,8 +804,6 @@ static void p9_read_work(struct work_struct *work) (*req->cb) (req, req->cba); else kfree(req->rcall); - - wake_up(&m->equeue); } } else { if (err >= 0 && rcall->id != P9_RFLUSH) @@ -908,8 +870,10 @@ static struct p9_req *p9_send_request(struct p9_conn *m, else n = p9_mux_get_tag(m); - if (n < 0) + if (n < 0) { + kfree(req); return ERR_PTR(-ENOMEM); + } p9_set_tag(tc, n); @@ -984,8 +948,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a) (*req->cb) (req, req->cba); else kfree(req->rcall); - - wake_up(&m->equeue); } kfree(freq->tcall); @@ -1191,8 +1153,6 @@ void p9_conn_cancel(struct p9_conn *m, int err) else kfree(req->rcall); } - - wake_up(&m->equeue); } /** @@ -1370,7 +1330,6 @@ p9_fd_poll(struct p9_trans *trans, struct poll_table_struct *pt) { int ret, n; struct p9_trans_fd *ts = NULL; - mm_segment_t oldfs; if (trans && trans->status == Connected) ts = trans->priv; @@ -1384,24 +1343,17 @@ p9_fd_poll(struct p9_trans *trans, struct poll_table_struct *pt) if (!ts->wr->f_op || !ts->wr->f_op->poll) return -EIO; - oldfs = get_fs(); - set_fs(get_ds()); - ret = ts->rd->f_op->poll(ts->rd, pt); if (ret < 0) - goto end; + return ret; if (ts->rd != ts->wr) { n = ts->wr->f_op->poll(ts->wr, pt); - if (n < 0) { - ret = n; - goto end; - } + if (n < 0) + return n; ret = (ret & ~POLLOUT) | (n & ~POLLIN); } -end: - set_fs(oldfs); return ret; } @@ -1629,6 +1581,7 @@ static struct p9_trans_module p9_tcp_trans = { .maxsize = MAX_SOCK_BUF, .def = 1, .create = p9_trans_create_tcp, + .owner = THIS_MODULE, }; static struct p9_trans_module p9_unix_trans = { @@ -1636,6 +1589,7 @@ static struct p9_trans_module p9_unix_trans = { .maxsize = MAX_SOCK_BUF, .def = 0, .create = p9_trans_create_unix, + .owner = THIS_MODULE, }; static struct p9_trans_module p9_fd_trans = { @@ -1643,14 +1597,20 @@ static struct p9_trans_module p9_fd_trans = { .maxsize = MAX_SOCK_BUF, .def = 0, .create = p9_trans_create_fd, + .owner = THIS_MODULE, }; int p9_trans_fd_init(void) { - int ret = p9_mux_global_init(); - if (ret) { - printk(KERN_WARNING "9p: starting mux failed\n"); - return ret; + int i; + + for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) + p9_mux_poll_tasks[i].task = NULL; + + p9_mux_wq = create_workqueue("v9fs"); + if (!p9_mux_wq) { + printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); + return -ENOMEM; } v9fs_register_trans(&p9_tcp_trans); @@ -1659,4 +1619,12 @@ int p9_trans_fd_init(void) return 0; } -EXPORT_SYMBOL(p9_trans_fd_init); + +void p9_trans_fd_exit(void) +{ + v9fs_unregister_trans(&p9_tcp_trans); + v9fs_unregister_trans(&p9_unix_trans); + v9fs_unregister_trans(&p9_fd_trans); + + destroy_workqueue(p9_mux_wq); +} diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 42adc052b149..94912e077a55 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -528,6 +528,7 @@ static struct p9_trans_module p9_virtio_trans = { .create = p9_virtio_create, .maxsize = PAGE_SIZE*16, .def = 0, + .owner = THIS_MODULE, }; /* The standard init function */ @@ -545,6 +546,7 @@ static int __init p9_virtio_init(void) static void __exit p9_virtio_cleanup(void) { unregister_virtio_driver(&p9_virtio_drv); + v9fs_unregister_trans(&p9_virtio_trans); } module_init(p9_virtio_init); diff --git a/net/core/dev.c b/net/core/dev.c index e719ed29310f..e8eb2b478344 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -122,6 +122,7 @@ #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/ip.h> +#include <net/ip.h> #include <linux/ipv6.h> #include <linux/in.h> #include <linux/jhash.h> @@ -1667,7 +1668,7 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) { u32 addr1, addr2, ports; u32 hash, ihl; - u8 ip_proto; + u8 ip_proto = 0; if (unlikely(!simple_tx_hashrnd_initialized)) { get_random_bytes(&simple_tx_hashrnd, 4); @@ -1676,7 +1677,8 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) switch (skb->protocol) { case __constant_htons(ETH_P_IP): - ip_proto = ip_hdr(skb)->protocol; + if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) + ip_proto = ip_hdr(skb)->protocol; addr1 = ip_hdr(skb)->saddr; addr2 = ip_hdr(skb)->daddr; ihl = ip_hdr(skb)->ihl; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1b4fee20fc93..011478e46c40 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -618,7 +618,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, ]; } rep; struct ip_reply_arg arg; - struct net *net = dev_net(skb->dev); + struct net *net = dev_net(skb->dst->dev); memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof(arg)); diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index 62e39ace0588..26654b26d7fa 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c @@ -97,8 +97,6 @@ hbh_mt6(const struct sk_buff *skb, const struct net_device *in, hdrlen -= 2; if (!(optinfo->flags & IP6T_OPTS_OPTS)) { return ret; - } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { - pr_debug("Not strict - not implemented"); } else { pr_debug("Strict "); pr_debug("#%d ", optinfo->optsnr); @@ -177,6 +175,12 @@ hbh_mt6_check(const char *tablename, const void *entry, pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags); return false; } + + if (optsinfo->flags & IP6T_OPTS_NSTRICT) { + pr_debug("ip6t_opts: Not strict - not implemented"); + return false; + } + return true; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9af6115f0f50..63442a1e741c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2688,6 +2688,8 @@ int __init ip6_route_init(void) if (ret) goto out_kmem_cache; + ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; + /* Registering of the loopback is done before this portion of code, * the loopback reference in rt6_info will not be taken, do it * manually for init_net */ diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b585c850a89a..10e22fd48222 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1050,7 +1050,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 struct tcphdr *th = tcp_hdr(skb), *t1; struct sk_buff *buff; struct flowi fl; - struct net *net = dev_net(skb->dev); + struct net *net = dev_net(skb->dst->dev); struct sock *ctl_sk = net->ipv6.tcp_sk; unsigned int tot_len = sizeof(struct tcphdr); __be32 *topt; diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 705959b31e24..d7b54b5bfa69 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -524,7 +524,6 @@ static int iucv_enable(void) get_online_cpus(); for_each_online_cpu(cpu) smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); - preempt_enable(); if (cpus_empty(iucv_buffer_cpumask)) /* No cpu could declare an iucv buffer. */ goto out_path; @@ -547,7 +546,9 @@ out: */ static void iucv_disable(void) { + get_online_cpus(); on_each_cpu(iucv_retrieve_cpu, NULL, 1); + put_online_cpus(); kfree(iucv_path_table); } diff --git a/net/key/af_key.c b/net/key/af_key.c index d628df97e02e..b7f5a1c353ee 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -73,22 +73,18 @@ static int pfkey_can_dump(struct sock *sk) return 0; } -static int pfkey_do_dump(struct pfkey_sock *pfk) +static void pfkey_terminate_dump(struct pfkey_sock *pfk) { - int rc; - - rc = pfk->dump.dump(pfk); - if (rc == -ENOBUFS) - return 0; - - pfk->dump.done(pfk); - pfk->dump.dump = NULL; - pfk->dump.done = NULL; - return rc; + if (pfk->dump.dump) { + pfk->dump.done(pfk); + pfk->dump.dump = NULL; + pfk->dump.done = NULL; + } } static void pfkey_sock_destruct(struct sock *sk) { + pfkey_terminate_dump(pfkey_sk(sk)); skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { @@ -310,6 +306,18 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, return err; } +static int pfkey_do_dump(struct pfkey_sock *pfk) +{ + int rc; + + rc = pfk->dump.dump(pfk); + if (rc == -ENOBUFS) + return 0; + + pfkey_terminate_dump(pfk); + return rc; +} + static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig) { *new = *orig; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index b599cbba4fbe..d68869f966c3 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1012,6 +1012,29 @@ end: return retval; } +struct sctp_chunk *sctp_make_violation_paramlen( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + struct sctp_paramhdr *param) +{ + struct sctp_chunk *retval; + static const char error[] = "The following parameter had invalid length:"; + size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) + + sizeof(sctp_paramhdr_t); + + retval = sctp_make_abort(asoc, chunk, payload_len); + if (!retval) + goto nodata; + + sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, + sizeof(error) + sizeof(sctp_paramhdr_t)); + sctp_addto_chunk(retval, sizeof(error), error); + sctp_addto_param(retval, sizeof(sctp_paramhdr_t), param); + +nodata: + return retval; +} + /* Make a HEARTBEAT chunk. */ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, const struct sctp_transport *transport, @@ -1782,11 +1805,6 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, const struct sctp_chunk *chunk, struct sctp_chunk **errp) { - static const char error[] = "The following parameter had invalid length:"; - size_t payload_len = WORD_ROUND(sizeof(error)) + - sizeof(sctp_paramhdr_t); - - /* This is a fatal error. Any accumulated non-fatal errors are * not reported. */ @@ -1794,14 +1812,7 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, sctp_chunk_free(*errp); /* Create an error chunk and fill it in with our payload. */ - *errp = sctp_make_op_error_space(asoc, chunk, payload_len); - - if (*errp) { - sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, - sizeof(error) + sizeof(sctp_paramhdr_t)); - sctp_addto_chunk(*errp, sizeof(error), error); - sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); - } + *errp = sctp_make_violation_paramlen(asoc, chunk, param); return 0; } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 8848d329aa2c..7c622af2ce55 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -119,7 +119,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, - void *arg, + void *arg, void *ext, sctp_cmd_seq_t *commands); static sctp_disposition_t sctp_sf_violation_ctsn( @@ -3425,7 +3425,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, addr_param = (union sctp_addr_param *)hdr->params; length = ntohs(addr_param->p.length); if (length < sizeof(sctp_paramhdr_t)) - return sctp_sf_violation_paramlen(ep, asoc, type, + return sctp_sf_violation_paramlen(ep, asoc, type, arg, (void *)addr_param, commands); /* Verify the ASCONF chunk before processing it. */ @@ -3433,8 +3433,8 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, (sctp_paramhdr_t *)((void *)addr_param + length), (void *)chunk->chunk_end, &err_param)) - return sctp_sf_violation_paramlen(ep, asoc, type, - (void *)&err_param, commands); + return sctp_sf_violation_paramlen(ep, asoc, type, arg, + (void *)err_param, commands); /* ADDIP 5.2 E1) Compare the value of the serial number to the value * the endpoint stored in a new association variable @@ -3542,8 +3542,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, (sctp_paramhdr_t *)addip_hdr->params, (void *)asconf_ack->chunk_end, &err_param)) - return sctp_sf_violation_paramlen(ep, asoc, type, - (void *)&err_param, commands); + return sctp_sf_violation_paramlen(ep, asoc, type, arg, + (void *)err_param, commands); if (last_asconf) { addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; @@ -4240,12 +4240,38 @@ static sctp_disposition_t sctp_sf_violation_paramlen( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, - void *arg, - sctp_cmd_seq_t *commands) { - static const char err_str[] = "The following parameter had invalid length:"; + void *arg, void *ext, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_paramhdr *param = ext; + struct sctp_chunk *abort = NULL; - return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, - sizeof(err_str)); + if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) + goto discard; + + /* Make the abort chunk. */ + abort = sctp_make_violation_paramlen(asoc, chunk, param); + if (!abort) + goto nomem; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNABORTED)); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); + SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + +discard: + sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); + + SCTP_INC_STATS(SCTP_MIB_ABORTEDS); + + return SCTP_DISPOSITION_ABORT; +nomem: + return SCTP_DISPOSITION_NOMEM; } /* Handle a protocol violation when the peer trying to advance the diff --git a/net/socket.c b/net/socket.c index 8ef8ba81b9e2..3e8d4e35c08f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1511,6 +1511,7 @@ out_fd: goto out_put; } +#if 0 #ifdef HAVE_SET_RESTORE_SIGMASK asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, @@ -1564,6 +1565,7 @@ asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, return do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); } #endif +#endif asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen) diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index ac25b4c0e982..dc50f1e71f76 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -27,10 +27,14 @@ static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb) - skb_headroom(skb); int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); - if (nhead > 0 || ntail > 0) - return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); - - return 0; + if (nhead <= 0) { + if (ntail <= 0) + return 0; + nhead = 0; + } else if (ntail < 0) + ntail = 0; + + return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); } static int xfrm_output_one(struct sk_buff *skb, int err) diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 36b5eedcdc75..3e1057f885c6 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -32,6 +32,7 @@ char *defconfig_file; static int indent = 1; static int valid_stdin = 1; +static int sync_kconfig; static int conf_cnt; static char line[128]; static struct menu *rootEntry; @@ -65,7 +66,7 @@ static void strip(char *str) static void check_stdin(void) { - if (!valid_stdin && input_mode == ask_silent) { + if (!valid_stdin) { printf(_("aborted!\n\n")); printf(_("Console input/output is redirected. ")); printf(_("Run 'make oldconfig' to update configuration.\n\n")); @@ -427,43 +428,6 @@ static void check_conf(struct menu *menu) check_conf(child); } -static void conf_do_update(void) -{ - /* Update until a loop caused no more changes */ - do { - conf_cnt = 0; - check_conf(&rootmenu); - } while (conf_cnt); -} - -static int conf_silent_update(void) -{ - const char *name; - - if (conf_get_changed()) { - name = getenv("KCONFIG_NOSILENTUPDATE"); - if (name && *name) { - fprintf(stderr, - _("\n*** Kernel configuration requires explicit update.\n\n")); - return 1; - } - conf_do_update(); - } - return 0; -} - -static int conf_update(void) -{ - rootEntry = &rootmenu; - conf(&rootmenu); - if (input_mode == ask_all) { - input_mode = ask_silent; - valid_stdin = 1; - } - conf_do_update(); - return 0; -} - int main(int ac, char **av) { int opt; @@ -477,11 +441,11 @@ int main(int ac, char **av) while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) { switch (opt) { case 'o': - input_mode = ask_new; + input_mode = ask_silent; break; case 's': input_mode = ask_silent; - valid_stdin = isatty(0) && isatty(1) && isatty(2); + sync_kconfig = 1; break; case 'd': input_mode = set_default; @@ -519,6 +483,19 @@ int main(int ac, char **av) name = av[optind]; conf_parse(name); //zconfdump(stdout); + if (sync_kconfig) { + if (stat(".config", &tmpstat)) { + fprintf(stderr, _("***\n" + "*** You have not yet configured your kernel!\n" + "*** (missing kernel .config file)\n" + "***\n" + "*** Please run some configurator (e.g. \"make oldconfig\" or\n" + "*** \"make menuconfig\" or \"make xconfig\").\n" + "***\n")); + exit(1); + } + } + switch (input_mode) { case set_default: if (!defconfig_file) @@ -531,16 +508,6 @@ int main(int ac, char **av) } break; case ask_silent: - if (stat(".config", &tmpstat)) { - printf(_("***\n" - "*** You have not yet configured your kernel!\n" - "*** (missing kernel .config file)\n" - "***\n" - "*** Please run some configurator (e.g. \"make oldconfig\" or\n" - "*** \"make menuconfig\" or \"make xconfig\").\n" - "***\n")); - exit(1); - } case ask_all: case ask_new: conf_read(NULL); @@ -569,6 +536,19 @@ int main(int ac, char **av) default: break; } + + if (sync_kconfig) { + if (conf_get_changed()) { + name = getenv("KCONFIG_NOSILENTUPDATE"); + if (name && *name) { + fprintf(stderr, + _("\n*** Kernel configuration requires explicit update.\n\n")); + return 1; + } + } + valid_stdin = isatty(0) && isatty(1) && isatty(2); + } + switch (input_mode) { case set_no: conf_set_all_new_symbols(def_no); @@ -585,27 +565,38 @@ int main(int ac, char **av) case set_default: conf_set_all_new_symbols(def_default); break; - case ask_silent: case ask_new: - if (conf_silent_update()) - exit(1); - break; case ask_all: - if (conf_update()) - exit(1); + rootEntry = &rootmenu; + conf(&rootmenu); + input_mode = ask_silent; + /* fall through */ + case ask_silent: + /* Update until a loop caused no more changes */ + do { + conf_cnt = 0; + check_conf(&rootmenu); + } while (conf_cnt); break; } - if (conf_write(NULL)) { - fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n")); - exit(1); - } - /* ask_silent is used during the build so we shall update autoconf. - * All other commands are only used to generate a config. - */ - if (input_mode == ask_silent && conf_write_autoconf()) { - fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n")); - return 1; + if (sync_kconfig) { + /* silentoldconfig is used during the build so we shall update autoconf. + * All other commands are only used to generate a config. + */ + if (conf_get_changed() && conf_write(NULL)) { + fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n")); + exit(1); + } + if (conf_write_autoconf()) { + fprintf(stderr, _("\n*** Error during update of the kernel configuration.\n\n")); + return 1; + } + } else { + if (conf_write(NULL)) { + fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n")); + exit(1); + } } return 0; } diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index df6a188b9930..b91cf241a539 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -222,8 +222,10 @@ load: continue; if (def == S_DEF_USER) { sym = sym_find(line + 9); - if (!sym) + if (!sym) { + sym_add_change_count(1); break; + } } else { sym = sym_lookup(line + 9, 0); if (sym->type == S_UNKNOWN) @@ -259,8 +261,10 @@ load: } if (def == S_DEF_USER) { sym = sym_find(line + 7); - if (!sym) + if (!sym) { + sym_add_change_count(1); break; + } } else { sym = sym_lookup(line + 7, 0); if (sym->type == S_UNKNOWN) diff --git a/scripts/kernel-doc b/scripts/kernel-doc index ff787e6ff8ed..44ee94d2ab76 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -781,6 +781,7 @@ sub output_struct_xml(%) { print " <refsect1>\n"; print " <title>Members</title>\n"; + if ($#{$args{'parameterlist'}} >= 0) { print " <variablelist>\n"; foreach $parameter (@{$args{'parameterlist'}}) { ($parameter =~ /^#/) && next; @@ -798,6 +799,9 @@ sub output_struct_xml(%) { print " </varlistentry>\n"; } print " </variablelist>\n"; + } else { + print " <para>\n None\n </para>\n"; + } print " </refsect1>\n"; output_section_xml(@_); diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index d11a8154500f..8551952ef329 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2737,6 +2737,7 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, if (ctx == NULL) goto netlbl_secattr_to_sid_return; + context_init(&ctx_new); ctx_new.user = ctx->user; ctx_new.role = ctx->role; ctx_new.type = ctx->type; @@ -2745,13 +2746,9 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat, secattr->attr.mls.cat) != 0) goto netlbl_secattr_to_sid_return; - ctx_new.range.level[1].cat.highbit = - ctx_new.range.level[0].cat.highbit; - ctx_new.range.level[1].cat.node = - ctx_new.range.level[0].cat.node; - } else { - ebitmap_init(&ctx_new.range.level[0].cat); - ebitmap_init(&ctx_new.range.level[1].cat); + memcpy(&ctx_new.range.level[1].cat, + &ctx_new.range.level[0].cat, + sizeof(ctx_new.range.level[0].cat)); } if (mls_context_isvalid(&policydb, &ctx_new) != 1) goto netlbl_secattr_to_sid_return_cleanup; diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 9dd9bc73fe1d..ece25c718e95 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -781,7 +781,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, return -ENODEV; card = pcm->card; - down_read(&card->controls_rwsem); + read_lock(&card->ctl_files_rwlock); list_for_each_entry(kctl, &card->ctl_files, list) { if (kctl->pid == current->pid) { prefer_subdevice = kctl->prefer_pcm_subdevice; @@ -789,7 +789,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, break; } } - up_read(&card->controls_rwsem); + read_unlock(&card->ctl_files_rwlock); switch (stream) { case SNDRV_PCM_STREAM_PLAYBACK: diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index c49b9d9e303c..c487025d3457 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1546,16 +1546,10 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream) card = substream->pcm->card; if (runtime->status->state == SNDRV_PCM_STATE_OPEN || - runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED) + runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED || + runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) return -EBADFD; - snd_power_lock(card); - if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { - result = snd_power_wait(card, SNDRV_CTL_POWER_D0); - if (result < 0) - goto _unlock; - } - snd_pcm_stream_lock_irq(substream); /* resume pause */ if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) @@ -1564,8 +1558,7 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream) snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); /* runtime->control->appl_ptr = runtime->status->hw_ptr; */ snd_pcm_stream_unlock_irq(substream); - _unlock: - snd_power_unlock(card); + return result; } diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index f7ea7287c59c..b917a9f981c7 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -418,7 +418,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) mutex_lock(&rmidi->open_mutex); while (1) { subdevice = -1; - down_read(&card->controls_rwsem); + read_lock(&card->ctl_files_rwlock); list_for_each_entry(kctl, &card->ctl_files, list) { if (kctl->pid == current->pid) { subdevice = kctl->prefer_rawmidi_subdevice; @@ -426,7 +426,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) break; } } - up_read(&card->controls_rwsem); + read_unlock(&card->ctl_files_rwlock); err = snd_rawmidi_kernel_open(rmidi->card, rmidi->device, subdevice, fflags, rawmidi_file); if (err >= 0) diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index ad994fcab725..f3da621f25c5 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -1683,8 +1683,8 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = { /* Dell 3 stack systems with verb table in BIOS */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0242, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0243, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ff, "Dell ", STAC_DELL_BIOS), diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c index 566a6d0daf4a..106c48225bba 100644 --- a/sound/ppc/awacs.c +++ b/sound/ppc/awacs.c @@ -621,6 +621,13 @@ static struct snd_kcontrol_new snd_pmac_screamer_mixers_imac[] __initdata = { AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), }; +static struct snd_kcontrol_new snd_pmac_screamer_mixers_g4agp[] __initdata = { + AWACS_VOLUME("Line out Playback Volume", 2, 6, 1), + AWACS_VOLUME("Master Playback Volume", 5, 6, 1), + AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), + AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0), +}; + static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac7500[] __initdata = { AWACS_VOLUME("Line out Playback Volume", 2, 6, 1), AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), @@ -688,7 +695,10 @@ static struct snd_kcontrol_new snd_pmac_awacs_speaker_vol[] __initdata = { static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw __initdata = AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_SPKMUTE, 1); -static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac __initdata = +static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac1 __initdata = +AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 1); + +static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac2 __initdata = AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 0); @@ -765,11 +775,12 @@ static void snd_pmac_awacs_resume(struct snd_pmac *chip) #define IS_PM7500 (machine_is_compatible("AAPL,7500")) #define IS_BEIGE (machine_is_compatible("AAPL,Gossamer")) -#define IS_IMAC (machine_is_compatible("PowerMac2,1") \ - || machine_is_compatible("PowerMac2,2") \ +#define IS_IMAC1 (machine_is_compatible("PowerMac2,1")) +#define IS_IMAC2 (machine_is_compatible("PowerMac2,2") \ || machine_is_compatible("PowerMac4,1")) +#define IS_G4AGP (machine_is_compatible("PowerMac3,1")) -static int imac; +static int imac1, imac2; #ifdef PMAC_SUPPORT_AUTOMUTE /* @@ -815,13 +826,18 @@ static void snd_pmac_awacs_update_automute(struct snd_pmac *chip, int do_notify) { int reg = chip->awacs_reg[1] | (MASK_HDMUTE | MASK_SPKMUTE); - if (imac) { + if (imac1) { + reg &= ~MASK_SPKMUTE; + reg |= MASK_PAROUT1; + } else if (imac2) { reg &= ~MASK_SPKMUTE; reg &= ~MASK_PAROUT1; } if (snd_pmac_awacs_detect_headphone(chip)) reg &= ~MASK_HDMUTE; - else if (imac) + else if (imac1) + reg &= ~MASK_PAROUT1; + else if (imac2) reg |= MASK_PAROUT1; else reg &= ~MASK_SPKMUTE; @@ -850,9 +866,13 @@ snd_pmac_awacs_init(struct snd_pmac *chip) { int pm7500 = IS_PM7500; int beige = IS_BEIGE; + int g4agp = IS_G4AGP; + int imac; int err, vol; - imac = IS_IMAC; + imac1 = IS_IMAC1; + imac2 = IS_IMAC2; + imac = imac1 || imac2; /* looks like MASK_GAINLINE triggers something, so we set here * as start-up */ @@ -939,7 +959,7 @@ snd_pmac_awacs_init(struct snd_pmac *chip) snd_pmac_awacs_mixers); if (err < 0) return err; - if (beige) + if (beige || g4agp) ; else if (chip->model == PMAC_SCREAMER) err = build_mixers(chip, ARRAY_SIZE(snd_pmac_screamer_mixers2), @@ -961,13 +981,17 @@ snd_pmac_awacs_init(struct snd_pmac *chip) err = build_mixers(chip, ARRAY_SIZE(snd_pmac_screamer_mixers_imac), snd_pmac_screamer_mixers_imac); + else if (g4agp) + err = build_mixers(chip, + ARRAY_SIZE(snd_pmac_screamer_mixers_g4agp), + snd_pmac_screamer_mixers_g4agp); else err = build_mixers(chip, ARRAY_SIZE(snd_pmac_awacs_mixers_pmac), snd_pmac_awacs_mixers_pmac); if (err < 0) return err; - chip->master_sw_ctl = snd_ctl_new1((pm7500 || imac) + chip->master_sw_ctl = snd_ctl_new1((pm7500 || imac || g4agp) ? &snd_pmac_awacs_master_sw_imac : &snd_pmac_awacs_master_sw, chip); err = snd_ctl_add(chip->card, chip->master_sw_ctl); @@ -1004,15 +1028,17 @@ snd_pmac_awacs_init(struct snd_pmac *chip) snd_pmac_awacs_speaker_vol); if (err < 0) return err; - chip->speaker_sw_ctl = snd_ctl_new1(imac - ? &snd_pmac_awacs_speaker_sw_imac + chip->speaker_sw_ctl = snd_ctl_new1(imac1 + ? &snd_pmac_awacs_speaker_sw_imac1 + : imac2 + ? &snd_pmac_awacs_speaker_sw_imac2 : &snd_pmac_awacs_speaker_sw, chip); err = snd_ctl_add(chip->card, chip->speaker_sw_ctl); if (err < 0) return err; } - if (beige) + if (beige || g4agp) err = build_mixers(chip, ARRAY_SIZE(snd_pmac_screamer_mic_boost_beige), snd_pmac_screamer_mic_boost_beige); diff --git a/sound/soc/at32/at32-pcm.c b/sound/soc/at32/at32-pcm.c index 435f1daf177c..c83584f989a9 100644 --- a/sound/soc/at32/at32-pcm.c +++ b/sound/soc/at32/at32-pcm.c @@ -434,7 +434,8 @@ static int at32_pcm_suspend(struct platform_device *pdev, params = prtd->params; /* Disable the PDC and save the PDC registers */ - ssc_writex(params->ssc->regs, PDC_PTCR, params->mask->pdc_disable); + ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR, + params->mask->pdc_disable); prtd->pdc_xpr_save = ssc_readx(params->ssc->regs, params->pdc->xpr); prtd->pdc_xcr_save = ssc_readx(params->ssc->regs, params->pdc->xcr); @@ -464,7 +465,7 @@ static int at32_pcm_resume(struct platform_device *pdev, ssc_writex(params->ssc->regs, params->pdc->xnpr, prtd->pdc_xnpr_save); ssc_writex(params->ssc->regs, params->pdc->xncr, prtd->pdc_xncr_save); - ssc_writex(params->ssc->regs, PDC_PTCR, params->mask->pdc_enable); + ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR, params->mask->pdc_enable); return 0; } #else /* CONFIG_PM */ diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c index 9deb8c74fdfd..0bbd94501d7e 100644 --- a/sound/soc/codecs/cs4270.c +++ b/sound/soc/codecs/cs4270.c @@ -490,34 +490,7 @@ static int cs4270_mute(struct snd_soc_dai *dai, int mute) #endif -static int cs4270_i2c_probe(struct i2c_adapter *adap, int addr, int kind); - -/* - * Notify the driver that a new I2C bus has been found. - * - * This function is called for each I2C bus in the system. The function - * then asks the I2C subsystem to probe that bus at the addresses on which - * our device (the CS4270) could exist. If a device is found at one of - * those addresses, then our probe function (cs4270_i2c_probe) is called. - */ -static int cs4270_i2c_attach(struct i2c_adapter *adapter) -{ - return i2c_probe(adapter, &addr_data, cs4270_i2c_probe); -} - -static int cs4270_i2c_detach(struct i2c_client *client) -{ - struct snd_soc_codec *codec = i2c_get_clientdata(client); - - i2c_detach_client(client); - codec->control_data = NULL; - - kfree(codec->reg_cache); - codec->reg_cache = NULL; - - kfree(client); - return 0; -} +static int cs4270_i2c_probe(struct i2c_client *, const struct i2c_device_id *); /* A list of non-DAPM controls that the CS4270 supports */ static const struct snd_kcontrol_new cs4270_snd_controls[] = { @@ -525,14 +498,19 @@ static const struct snd_kcontrol_new cs4270_snd_controls[] = { CS4270_VOLA, CS4270_VOLB, 0, 0xFF, 1) }; +static const struct i2c_device_id cs4270_id[] = { + {"cs4270", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, cs4270_id); + static struct i2c_driver cs4270_i2c_driver = { .driver = { .name = "CS4270 I2C", .owner = THIS_MODULE, }, - .id = I2C_DRIVERID_CS4270, - .attach_adapter = cs4270_i2c_attach, - .detach_client = cs4270_i2c_detach, + .id_table = cs4270_id, + .probe = cs4270_i2c_probe, }; /* @@ -561,11 +539,11 @@ static struct snd_soc_device *cs4270_socdev; * Note: snd_soc_new_pcms() must be called before this function can be called, * because of snd_ctl_add(). */ -static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) +static int cs4270_i2c_probe(struct i2c_client *i2c_client, + const struct i2c_device_id *id) { struct snd_soc_device *socdev = cs4270_socdev; struct snd_soc_codec *codec = socdev->codec; - struct i2c_client *i2c_client = NULL; int i; int ret = 0; @@ -578,12 +556,6 @@ static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) /* Note: codec_dai->codec is NULL here */ - i2c_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); - if (!i2c_client) { - printk(KERN_ERR "cs4270: could not allocate I2C client\n"); - return -ENOMEM; - } - codec->reg_cache = kzalloc(CS4270_NUMREGS, GFP_KERNEL); if (!codec->reg_cache) { printk(KERN_ERR "cs4270: could not allocate register cache\n"); @@ -591,13 +563,6 @@ static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) goto error; } - i2c_set_clientdata(i2c_client, codec); - strcpy(i2c_client->name, "CS4270"); - - i2c_client->driver = &cs4270_i2c_driver; - i2c_client->adapter = adapter; - i2c_client->addr = addr; - /* Verify that we have a CS4270 */ ret = i2c_smbus_read_byte_data(i2c_client, CS4270_CHIPID); @@ -612,18 +577,10 @@ static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) goto error; } - printk(KERN_INFO "cs4270: found device at I2C address %X\n", addr); + printk(KERN_INFO "cs4270: found device at I2C address %X\n", + i2c_client->addr); printk(KERN_INFO "cs4270: hardware revision %X\n", ret & 0xF); - /* Tell the I2C layer a new client has arrived */ - - ret = i2c_attach_client(i2c_client); - if (ret) { - printk(KERN_ERR "cs4270: could not attach codec, " - "I2C address %x, error code %i\n", addr, ret); - goto error; - } - codec->control_data = i2c_client; codec->read = cs4270_read_reg_cache; codec->write = cs4270_i2c_write; @@ -648,20 +605,17 @@ static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) goto error; } + i2c_set_clientdata(i2c_client, codec); + return 0; error: - if (codec->control_data) { - i2c_detach_client(i2c_client); - codec->control_data = NULL; - } + codec->control_data = NULL; kfree(codec->reg_cache); codec->reg_cache = NULL; codec->reg_cache_size = 0; - kfree(i2c_client); - return ret; } @@ -727,7 +681,7 @@ static int cs4270_probe(struct platform_device *pdev) ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); if (ret < 0) { printk(KERN_ERR "cs4270: failed to create PCMs\n"); - return ret; + goto error_free_codec; } #ifdef USE_I2C @@ -736,8 +690,7 @@ static int cs4270_probe(struct platform_device *pdev) ret = i2c_add_driver(&cs4270_i2c_driver); if (ret) { printk(KERN_ERR "cs4270: failed to attach driver"); - snd_soc_free_pcms(socdev); - return ret; + goto error_free_pcms; } /* Did we find a CS4270 on the I2C bus? */ @@ -759,10 +712,23 @@ static int cs4270_probe(struct platform_device *pdev) ret = snd_soc_register_card(socdev); if (ret < 0) { printk(KERN_ERR "cs4270: failed to register card\n"); - snd_soc_free_pcms(socdev); - return ret; + goto error_del_driver; } + return 0; + +error_del_driver: +#ifdef USE_I2C + i2c_del_driver(&cs4270_i2c_driver); + +error_free_pcms: +#endif + snd_soc_free_pcms(socdev); + +error_free_codec: + kfree(socdev->codec); + socdev->codec = NULL; + return ret; } @@ -773,8 +739,7 @@ static int cs4270_remove(struct platform_device *pdev) snd_soc_free_pcms(socdev); #ifdef USE_I2C - if (socdev->codec->control_data) - i2c_del_driver(&cs4270_i2c_driver); + i2c_del_driver(&cs4270_i2c_driver); #endif kfree(socdev->codec); diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c index 5761164fe16d..e873414840c8 100644 --- a/sound/soc/codecs/wm8753.c +++ b/sound/soc/codecs/wm8753.c @@ -583,7 +583,7 @@ static const struct snd_soc_dapm_route audio_map[] = { /* out 4 */ {"Out4 Mux", "VREF", "VREF"}, - {"Out4 Mux", "Capture ST", "Capture ST Mixer"}, + {"Out4 Mux", "Capture ST", "Playback Mixer"}, {"Out4 Mux", "LOUT2", "LOUT2"}, {"Out 4", NULL, "Out4 Mux"}, {"OUT4", NULL, "Out 4"}, @@ -607,7 +607,7 @@ static const struct snd_soc_dapm_route audio_map[] = { /* Capture Right Mux */ {"Capture Right Mux", "PGA", "Right Capture Volume"}, {"Capture Right Mux", "Line or RXP-RXN", "Line Right Mux"}, - {"Capture Right Mux", "Sidetone", "Capture ST Mixer"}, + {"Capture Right Mux", "Sidetone", "Playback Mixer"}, /* Mono Capture mixer-mux */ {"Capture Right Mixer", "Stereo", "Capture Right Mux"}, |