diff options
author | Paul Mackerras <paulus@samba.org> | 2008-12-16 06:38:58 +0300 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-12-16 06:38:58 +0300 |
commit | 1e1c568d6c66d1e2e345fd15e2a1ceafc5d7e33a (patch) | |
tree | 0cf88547108a750d6eb910564ef5bf0ffb5ceef3 /drivers | |
parent | 91cac623262c1c0cd298c5c648a8bd2b647c264d (diff) | |
parent | 23e0e8afafd9ac065d81506524adf3339584044b (diff) | |
download | linux-1e1c568d6c66d1e2e345fd15e2a1ceafc5d7e33a.tar.xz |
Merge branch 'merge' into next
Diffstat (limited to 'drivers')
70 files changed, 841 insertions, 694 deletions
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index a0a178dd189c..1423b0c0cd2e 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -174,15 +174,6 @@ static int acpi_battery_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CURRENT_NOW: val->intval = battery->current_now * 1000; - /* if power units are mW, convert to mA by - dividing by current voltage (mV/1000) */ - if (!battery->power_unit) { - if (battery->voltage_now) { - val->intval /= battery->voltage_now; - val->intval *= 1000; - } else - val->intval = -1; - } break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 78fbec8ceda0..421b7c71e72d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -153,7 +153,7 @@ config SATA_PROMISE If unsure, say N. config SATA_SX4 - tristate "Promise SATA SX4 support" + tristate "Promise SATA SX4 support (Experimental)" depends on PCI && EXPERIMENTAL help This option enables support for Promise Serial ATA SX4. @@ -219,8 +219,8 @@ config PATA_ACPI otherwise unsupported hardware. config PATA_ALI - tristate "ALi PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "ALi PATA support" + depends on PCI help This option enables support for the ALi ATA interfaces found on the many ALi chipsets. @@ -263,7 +263,7 @@ config PATA_ATIIXP If unsure, say N. config PATA_CMD640_PCI - tristate "CMD640 PCI PATA support (Very Experimental)" + tristate "CMD640 PCI PATA support (Experimental)" depends on PCI && EXPERIMENTAL help This option enables support for the CMD640 PCI IDE @@ -291,8 +291,8 @@ config PATA_CS5520 If unsure, say N. config PATA_CS5530 - tristate "CS5530 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "CS5530 PATA support" + depends on PCI help This option enables support for the Cyrix/NatSemi/AMD CS5530 companion chip used with the MediaGX/Geode processor family. @@ -309,8 +309,8 @@ config PATA_CS5535 If unsure, say N. config PATA_CS5536 - tristate "CS5536 PATA support (Experimental)" - depends on PCI && X86 && !X86_64 && EXPERIMENTAL + tristate "CS5536 PATA support" + depends on PCI && X86 && !X86_64 help This option enables support for the AMD CS5536 companion chip used with the Geode LX processor family. @@ -363,7 +363,7 @@ config PATA_HPT37X If unsure, say N. config PATA_HPT3X2N - tristate "HPT 372N/302N PATA support (Very Experimental)" + tristate "HPT 372N/302N PATA support (Experimental)" depends on PCI && EXPERIMENTAL help This option enables support for the N variant HPT PATA @@ -389,8 +389,8 @@ config PATA_HPT3X3_DMA problems with DMA on this chipset. config PATA_ISAPNP - tristate "ISA Plug and Play PATA support (Experimental)" - depends on EXPERIMENTAL && ISAPNP + tristate "ISA Plug and Play PATA support" + depends on ISAPNP help This option enables support for ISA plug & play ATA controllers such as those found on old soundcards. @@ -498,8 +498,8 @@ config PATA_NINJA32 If unsure, say N. config PATA_NS87410 - tristate "Nat Semi NS87410 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "Nat Semi NS87410 PATA support" + depends on PCI help This option enables support for the National Semiconductor NS87410 PCI-IDE controller. @@ -507,8 +507,8 @@ config PATA_NS87410 If unsure, say N. config PATA_NS87415 - tristate "Nat Semi NS87415 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "Nat Semi NS87415 PATA support" + depends on PCI help This option enables support for the National Semiconductor NS87415 PCI-IDE controller. @@ -544,8 +544,8 @@ config PATA_PCMCIA If unsure, say N. config PATA_PDC_OLD - tristate "Older Promise PATA controller support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "Older Promise PATA controller support" + depends on PCI help This option enables support for the Promise 20246, 20262, 20263, 20265 and 20267 adapters. @@ -559,7 +559,7 @@ config PATA_QDI Support for QDI 6500 and 6580 PATA controllers on VESA local bus. config PATA_RADISYS - tristate "RADISYS 82600 PATA support (Very Experimental)" + tristate "RADISYS 82600 PATA support (Experimental)" depends on PCI && EXPERIMENTAL help This option enables support for the RADISYS 82600 @@ -586,8 +586,8 @@ config PATA_RZ1000 If unsure, say N. config PATA_SC1200 - tristate "SC1200 PATA support (Very Experimental)" - depends on PCI && EXPERIMENTAL + tristate "SC1200 PATA support" + depends on PCI help This option enables support for the NatSemi/AMD SC1200 SoC companion chip used with the Geode processor family. @@ -620,8 +620,8 @@ config PATA_SIL680 If unsure, say N. config PATA_SIS - tristate "SiS PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "SiS PATA support" + depends on PCI help This option enables support for SiS PATA controllers diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d6d97d8f3fa4..c11936e13dd3 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -1072,7 +1072,14 @@ static int piix_broken_suspend(void) * matching is necessary because dmi_system_id.matches is * limited to four entries. */ - if (!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") && + if (dmi_get_system_info(DMI_SYS_VENDOR) && + dmi_get_system_info(DMI_PRODUCT_NAME) && + dmi_get_system_info(DMI_PRODUCT_VERSION) && + dmi_get_system_info(DMI_PRODUCT_SERIAL) && + dmi_get_system_info(DMI_BOARD_VENDOR) && + dmi_get_system_info(DMI_BOARD_NAME) && + dmi_get_system_info(DMI_BOARD_VERSION) && + !strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") && !strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") && !strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") && !strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") && diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index f2b83eabc7c7..a098ba8eaab6 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -382,10 +382,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) /* PCI clocking determines the ATA timing values to use */ /* info_hpt366 is safe against re-entry so we can scribble on it */ switch((reg1 & 0x700) >> 8) { - case 5: + case 9: hpriv = &hpt366_40; break; - case 9: + case 5: hpriv = &hpt366_25; break; default: diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index 4e466eae8b46..4dd9a3b031e4 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c @@ -44,7 +44,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_ninja32" -#define DRV_VERSION "0.1.1" +#define DRV_VERSION "0.1.3" /** @@ -130,7 +130,8 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) return rc; pci_set_master(dev); - /* Set up the register mappings */ + /* Set up the register mappings. We use the I/O mapping as only the + older chips also have MMIO on BAR 1 */ base = host->iomap[0]; if (!base) return -ENOMEM; @@ -167,8 +168,12 @@ static int ninja32_reinit_one(struct pci_dev *pdev) #endif static const struct pci_device_id ninja32[] = { + { 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { }, }; diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index d34236611752..e4be55e047f6 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -56,7 +56,6 @@ static const struct sis_laptop sis_laptop[] = { { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */ { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */ - { 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */ /* end marker */ { 0, } }; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index f20bf359b84f..dc7a8c352da2 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -302,7 +302,7 @@ static struct kobj_type kobj_pkt_type_wqueue = { static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) { if (class_pktcdvd) { - pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, NULL, + pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL, "%s", pd->name); if (IS_ERR(pd->dev)) pd->dev = NULL; @@ -2790,7 +2790,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) return 0; out_mem: - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return ret; @@ -2975,7 +2975,7 @@ static int pkt_remove_dev(dev_t pkt_dev) pkt_debugfs_dev_remove(pd); pkt_sysfs_dev_remove(pd); - blkdev_put(pd->bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); remove_proc_entry(pd->name, pkt_proc); DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index d16b02423d61..7d2e91cccb13 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2081,10 +2081,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, if (!q) return -ENXIO; - rq = blk_get_request(q, READ, GFP_KERNEL); - if (!rq) - return -ENOMEM; - cdi->last_sense = 0; while (nframes) { @@ -2096,9 +2092,17 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, len = nr * CD_FRAMESIZE_RAW; + rq = blk_get_request(q, READ, GFP_KERNEL); + if (!rq) { + ret = -ENOMEM; + break; + } + ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); - if (ret) + if (ret) { + blk_put_request(rq); break; + } rq->cmd[0] = GPCMD_READ_CD; rq->cmd[1] = 1 << 2; @@ -2124,6 +2128,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, if (blk_rq_unmap_user(bio)) ret = -EFAULT; + blk_put_request(rq); if (ret) break; @@ -2133,7 +2138,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, ubuf += len; } - blk_put_request(rq); return ret; } diff --git a/drivers/char/cp437.uni b/drivers/char/cp437.uni index 1f06889a96b9..bc6163484f62 100644 --- a/drivers/char/cp437.uni +++ b/drivers/char/cp437.uni @@ -27,7 +27,7 @@ 0x0c U+2640 0x0d U+266a 0x0e U+266b -0x0f U+263c +0x0f U+263c U+00a4 0x10 U+25b6 U+25ba 0x11 U+25c0 U+25c4 0x12 U+2195 @@ -55,7 +55,7 @@ 0x24 U+0024 0x25 U+0025 0x26 U+0026 -0x27 U+0027 +0x27 U+0027 U+00b4 0x28 U+0028 0x29 U+0029 0x2a U+002a @@ -84,7 +84,7 @@ 0x41 U+0041 U+00c0 U+00c1 U+00c2 U+00c3 0x42 U+0042 0x43 U+0043 U+00a9 -0x44 U+0044 +0x44 U+0044 U+00d0 0x45 U+0045 U+00c8 U+00ca U+00cb 0x46 U+0046 0x47 U+0047 @@ -140,7 +140,7 @@ 0x79 U+0079 U+00fd 0x7a U+007a 0x7b U+007b -0x7c U+007c U+00a5 +0x7c U+007c U+00a6 0x7d U+007d 0x7e U+007e # @@ -263,10 +263,10 @@ 0xe8 U+03a6 U+00d8 0xe9 U+0398 0xea U+03a9 U+2126 -0xeb U+03b4 +0xeb U+03b4 U+00f0 0xec U+221e 0xed U+03c6 U+00f8 -0xee U+03b5 +0xee U+03b5 U+2208 0xef U+2229 0xf0 U+2261 0xf1 U+00b1 diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index 3b23270eaa65..a8f15e6be594 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c @@ -418,7 +418,7 @@ static irqreturn_t cd2401_rxerr_interrupt(int irq, void *dev_id) TTY_OVERRUN); /* If the flip buffer itself is - overflowing, we still loose + overflowing, we still lose the next incoming character. */ if (tty_buffer_request_room(tty, 1) != diff --git a/drivers/char/vt.c b/drivers/char/vt.c index a5af6072e2b3..008176edbd64 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -2274,7 +2274,7 @@ rescan_last_byte: continue; /* nothing to display */ } /* Glyph not found */ - if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) { + if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) { /* In legacy mode use the glyph we get by a 1:1 mapping. This would make absolutely no sense with Unicode in mind, but do this for ASCII characters since a font may lack diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 46610b090415..ab9c01e462ef 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -974,6 +974,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) packet->ack = RCODE_SEND_ERROR; return -1; } + packet->payload_bus = payload_bus; d[2].req_count = cpu_to_le16(packet->payload_length); d[2].data_address = cpu_to_le32(payload_bus); @@ -1025,7 +1026,6 @@ static int handle_at_packet(struct context *context, struct driver_data *driver_data; struct fw_packet *packet; struct fw_ohci *ohci = context->ohci; - dma_addr_t payload_bus; int evt; if (last->transfer_status == 0) @@ -1038,9 +1038,8 @@ static int handle_at_packet(struct context *context, /* This packet was cancelled, just continue. */ return 1; - payload_bus = le32_to_cpu(last->data_address); - if (payload_bus != 0) - dma_unmap_single(ohci->card.device, payload_bus, + if (packet->payload_bus) + dma_unmap_single(ohci->card.device, packet->payload_bus, packet->payload_length, DMA_TO_DEVICE); evt = le16_to_cpu(last->transfer_status) & 0x1f; @@ -1697,6 +1696,10 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) if (packet->ack != 0) goto out; + if (packet->payload_bus) + dma_unmap_single(ohci->card.device, packet->payload_bus, + packet->payload_length, DMA_TO_DEVICE); + log_ar_at_event('T', packet->speed, packet->header, 0x20); driver_data->packet = NULL; packet->ack = RCODE_CANCELLED; diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 022ac4fabb67..2884f876397b 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -207,6 +207,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, packet->speed = speed; packet->generation = generation; packet->ack = 0; + packet->payload_bus = 0; } /** @@ -581,6 +582,8 @@ fw_fill_response(struct fw_packet *response, u32 *request_header, BUG(); return; } + + response->payload_bus = 0; } EXPORT_SYMBOL(fw_fill_response); diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index aed7dbb17cda..839466f0a795 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -27,6 +27,7 @@ #include <linux/list.h> #include <linux/spinlock_types.h> #include <linux/timer.h> +#include <linux/types.h> #include <linux/workqueue.h> #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) @@ -153,6 +154,7 @@ struct fw_packet { size_t header_length; void *payload; size_t payload_length; + dma_addr_t payload_bus; u32 timestamp; /* diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ba89b42f790a..553dd4bc3075 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -847,9 +847,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may - * be lost or delayed + * be lost or delayed, but we use them anyways to avoid + * stuck interrupts on some machines. */ - if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) + if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); intel_opregion_init(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0a4f39b9a0ec..adc972cc6bfc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -244,6 +244,10 @@ typedef struct drm_i915_private { * List of objects currently involved in rendering from the * ringbuffer. * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * * A reference is held on the buffer while on this list. */ struct list_head active_list; @@ -253,6 +257,8 @@ typedef struct drm_i915_private { * still have a write_domain which needs to be flushed before * unbinding. * + * last_rendering_seqno is 0 while an object is in this list. + * * A reference is held on the buffer while on this list. */ struct list_head flushing_list; @@ -261,6 +267,8 @@ typedef struct drm_i915_private { * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * + * last_rendering_seqno is 0 while an object is in this list. + * * A reference is not held on the buffer while on this list, * as merely being GTT-bound shouldn't prevent its being * freed, and we'll pull it off the list in the free path. @@ -371,8 +379,8 @@ struct drm_i915_gem_object { uint32_t agp_type; /** - * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when - * GEM_DOMAIN_CPU is not in the object's read domain. + * If present, while GEM_DOMAIN_CPU is in the read domain this array + * flags which individual pages are valid. */ uint8_t *page_cpu_valid; }; @@ -394,9 +402,6 @@ struct drm_i915_gem_request { /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; - /** Cache domains that were flushed at the start of the request. */ - uint32_t flush_domains; - struct list_head list; }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d58ddef468f8..ad672d854828 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -33,21 +33,21 @@ #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) -static int -i915_gem_object_set_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); -static int -i915_gem_object_set_domain_range(struct drm_gem_object *obj, - uint64_t offset, - uint64_t size, - uint32_t read_domains, - uint32_t write_domain); -static int -i915_gem_set_domain(struct drm_gem_object *obj, - struct drm_file *file_priv, - uint32_t read_domains, - uint32_t write_domain); +static void +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain); +static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); +static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); +static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, + int write); +static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, + int write); +static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, + uint64_t offset, + uint64_t size); +static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); static int i915_gem_object_get_page_list(struct drm_gem_object *obj); static void i915_gem_object_free_page_list(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); @@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, - I915_GEM_DOMAIN_CPU, 0); + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + args->size); if (ret != 0) { drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, mutex_unlock(&dev->struct_mutex); return ret; } - ret = i915_gem_set_domain(obj, file_priv, - I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); + ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) goto fail; @@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, mutex_lock(&dev->struct_mutex); - ret = i915_gem_set_domain(obj, file_priv, - I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); + ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret) { mutex_unlock(&dev->struct_mutex); return ret; @@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, } /** - * Called when user space prepares to use an object + * Called when user space prepares to use an object with the CPU, either + * through the mmap ioctl's mapping or a GTT mapping. */ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, @@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_set_domain *args = data; struct drm_gem_object *obj; + uint32_t read_domains = args->read_domains; + uint32_t write_domain = args->write_domain; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; + /* Only handle setting domains to types used by the CPU. */ + if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + return -EINVAL; + + if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + return -EINVAL; + + /* Having something in the write domain implies it's in the read + * domain, and only that read domain. Enforce that in the request. + */ + if (write_domain != 0 && read_domains != write_domain) + return -EINVAL; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EBADF; @@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); #if WATCH_BUF DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", - obj, obj->size, args->read_domains, args->write_domain); + obj, obj->size, read_domains, write_domain); #endif - ret = i915_gem_set_domain(obj, file_priv, - args->read_domains, args->write_domain); + if (read_domains & I915_GEM_DOMAIN_GTT) { + ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); + + /* Silently promote "you're not bound, there was nothing to do" + * to success, since the client was just asking us to + * make sure everything was done. + */ + if (ret == -EINVAL) + ret = 0; + } else { + ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); + } + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; @@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, obj_priv = obj->driver_private; /* Pinned buffers may be scanout, so flush the cache */ - if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - } + if (obj_priv->pin_count) + i915_gem_object_flush_cpu_write_domain(obj); + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; @@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj) } static void -i915_gem_object_move_to_active(struct drm_gem_object *obj) +i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj) /* Move from whatever list we were on to the tail of execution. */ list_move_tail(&obj_priv->list, &dev_priv->mm.active_list); + obj_priv->last_rendering_seqno = seqno; } +static void +i915_gem_object_move_to_flushing(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + BUG_ON(!obj_priv->active); + list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); + obj_priv->last_rendering_seqno = 0; +} static void i915_gem_object_move_to_inactive(struct drm_gem_object *obj) @@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) else list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + obj_priv->last_rendering_seqno = 0; if (obj_priv->active) { obj_priv->active = 0; drm_gem_object_unreference(obj); @@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) request->seqno = seqno; request->emitted_jiffies = jiffies; - request->flush_domains = flush_domains; was_empty = list_empty(&dev_priv->mm.request_list); list_add_tail(&request->list, &dev_priv->mm.request_list); + /* Associate any objects on the flushing list matching the write + * domain we're flushing with our flush. + */ + if (flush_domains != 0) { + struct drm_i915_gem_object *obj_priv, *next; + + list_for_each_entry_safe(obj_priv, next, + &dev_priv->mm.flushing_list, list) { + struct drm_gem_object *obj = obj_priv->obj; + + if ((obj->write_domain & flush_domains) == + obj->write_domain) { + obj->write_domain = 0; + i915_gem_object_move_to_active(obj, seqno); + } + } + + } + if (was_empty && !dev_priv->mm.suspended) schedule_delayed_work(&dev_priv->mm.retire_work, HZ); return seqno; @@ -676,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev, __func__, request->seqno, obj); #endif - if (obj->write_domain != 0) { - list_move_tail(&obj_priv->list, - &dev_priv->mm.flushing_list); - } else { + if (obj->write_domain != 0) + i915_gem_object_move_to_flushing(obj); + else i915_gem_object_move_to_inactive(obj); - } - } - - if (request->flush_domains != 0) { - struct drm_i915_gem_object *obj_priv, *next; - - /* Clear the write domain and activity from any buffers - * that are just waiting for a flush matching the one retired. - */ - list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.flushing_list, list) { - struct drm_gem_object *obj = obj_priv->obj; - - if (obj->write_domain & request->flush_domains) { - obj->write_domain = 0; - i915_gem_object_move_to_inactive(obj); - } - } - } } @@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = obj->driver_private; int ret; - /* If there are writes queued to the buffer, flush and - * create a new seqno to wait for. + /* This function only exists to support waiting for existing rendering, + * not for emitting required flushes. */ - if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { - uint32_t write_domain = obj->write_domain; -#if WATCH_BUF - DRM_INFO("%s: flushing object %p from write domain %08x\n", - __func__, obj, write_domain); -#endif - i915_gem_flush(dev, 0, write_domain); - - i915_gem_object_move_to_active(obj); - obj_priv->last_rendering_seqno = i915_add_request(dev, - write_domain); - BUG_ON(obj_priv->last_rendering_seqno == 0); -#if WATCH_LRU - DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); -#endif - } + BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); /* If there is rendering queued on the buffer being evicted, wait for * it. @@ -950,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return -EINVAL; } - /* Wait for any rendering to complete - */ - ret = i915_gem_object_wait_rendering(obj); - if (ret) { - DRM_ERROR("wait_rendering failed: %d\n", ret); - return ret; - } - /* Move the object to the CPU domain to ensure that * any possible CPU writes while it's not in the GTT * are flushed when we go to remap it. This will * also ensure that all pending GPU writes are finished * before we unbind. */ - ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, - I915_GEM_DOMAIN_CPU); + ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret) { - DRM_ERROR("set_domain failed: %d\n", ret); + if (ret != -ERESTARTSYS) + DRM_ERROR("set_domain failed: %d\n", ret); return ret; } @@ -1083,6 +1095,21 @@ i915_gem_evict_something(struct drm_device *dev) } static int +i915_gem_evict_everything(struct drm_device *dev) +{ + int ret; + + for (;;) { + ret = i915_gem_evict_something(dev); + if (ret != 0) + break; + } + if (ret == -ENOMEM) + return 0; + return ret; +} + +static int i915_gem_object_get_page_list(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -1168,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) ret = i915_gem_evict_something(dev); if (ret != 0) { - DRM_ERROR("Failed to evict a buffer %d\n", ret); + if (ret != -ERESTARTSYS) + DRM_ERROR("Failed to evict a buffer %d\n", ret); return ret; } goto search_free; @@ -1228,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj) drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); } +/** Flushes any GPU write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + uint32_t seqno; + + if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) + return; + + /* Queue the GPU write cache flushing we need. */ + i915_gem_flush(dev, 0, obj->write_domain); + seqno = i915_add_request(dev, obj->write_domain); + obj->write_domain = 0; + i915_gem_object_move_to_active(obj, seqno); +} + +/** Flushes the GTT write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) +{ + if (obj->write_domain != I915_GEM_DOMAIN_GTT) + return; + + /* No actual flushing is required for the GTT write domain. Writes + * to it immediately go to main memory as far as we know, so there's + * no chipset flush. It also doesn't land in render cache. + */ + obj->write_domain = 0; +} + +/** Flushes the CPU write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + if (obj->write_domain != I915_GEM_DOMAIN_CPU) + return; + + i915_gem_clflush_object(obj); + drm_agp_chipset_flush(dev); + obj->write_domain = 0; +} + +/** + * Moves a single object to the GTT read, and possibly write domain. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +static int +i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int ret; + + /* Not valid to be called on unbound objects. */ + if (obj_priv->gtt_space == NULL) + return -EINVAL; + + i915_gem_object_flush_gpu_write_domain(obj); + /* Wait on any GPU rendering and flushing to occur. */ + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return ret; + + /* If we're writing through the GTT domain, then CPU and GPU caches + * will need to be invalidated at next use. + */ + if (write) + obj->read_domains &= I915_GEM_DOMAIN_GTT; + + i915_gem_object_flush_cpu_write_domain(obj); + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); + obj->read_domains |= I915_GEM_DOMAIN_GTT; + if (write) { + obj->write_domain = I915_GEM_DOMAIN_GTT; + obj_priv->dirty = 1; + } + + return 0; +} + +/** + * Moves a single object to the CPU read, and possibly write domain. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +static int +i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) +{ + struct drm_device *dev = obj->dev; + int ret; + + i915_gem_object_flush_gpu_write_domain(obj); + /* Wait on any GPU rendering and flushing to occur. */ + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return ret; + + i915_gem_object_flush_gtt_write_domain(obj); + + /* If we have a partially-valid cache of the object in the CPU, + * finish invalidating it and free the per-page flags. + */ + i915_gem_object_set_to_full_cpu_read_domain(obj); + + /* Flush the CPU cache if it's still invalid. */ + if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { + i915_gem_clflush_object(obj); + drm_agp_chipset_flush(dev); + + obj->read_domains |= I915_GEM_DOMAIN_CPU; + } + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + + /* If we're writing through the CPU, then the GPU read domains will + * need to be invalidated at next use. + */ + if (write) { + obj->read_domains &= I915_GEM_DOMAIN_CPU; + obj->write_domain = I915_GEM_DOMAIN_CPU; + } + + return 0; +} + /* * Set the next domain for the specified object. This * may not actually perform the necessary flushing/invaliding though, @@ -1339,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj) * MI_FLUSH * drm_agp_chipset_flush */ -static int -i915_gem_object_set_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain) +static void +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; - int ret; + + BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); + BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); #if WATCH_BUF DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", @@ -1385,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", __func__, flush_domains, invalidate_domains); #endif - /* - * If we're invaliding the CPU cache and flushing a GPU cache, - * then pause for rendering so that the GPU caches will be - * flushed before the cpu cache is invalidated - */ - if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && - (flush_domains & ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT))) { - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; - } i915_gem_clflush_object(obj); } if ((write_domain | flush_domains) != 0) obj->write_domain = write_domain; - - /* If we're invalidating the CPU domain, clear the per-page CPU - * domain list as well. - */ - if (obj_priv->page_cpu_valid != NULL && - (write_domain != 0 || - read_domains & I915_GEM_DOMAIN_CPU)) { - drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, - DRM_MEM_DRIVER); - obj_priv->page_cpu_valid = NULL; - } obj->read_domains = read_domains; dev->invalidate_domains |= invalidate_domains; @@ -1423,47 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, obj->read_domains, obj->write_domain, dev->invalidate_domains, dev->flush_domains); #endif - return 0; } /** - * Set the read/write domain on a range of the object. + * Moves the object from a partially CPU read to a full one. * - * Currently only implemented for CPU reads, otherwise drops to normal - * i915_gem_object_set_domain(). + * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), + * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). */ -static int -i915_gem_object_set_domain_range(struct drm_gem_object *obj, - uint64_t offset, - uint64_t size, - uint32_t read_domains, - uint32_t write_domain) +static void +i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) { + struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; - int ret, i; - if (obj->read_domains & I915_GEM_DOMAIN_CPU) - return 0; + if (!obj_priv->page_cpu_valid) + return; - if (read_domains != I915_GEM_DOMAIN_CPU || - write_domain != 0) - return i915_gem_object_set_domain(obj, - read_domains, write_domain); + /* If we're partially in the CPU read domain, finish moving it in. + */ + if (obj->read_domains & I915_GEM_DOMAIN_CPU) { + int i; - /* Wait on any GPU rendering to the object to be flushed. */ + for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { + if (obj_priv->page_cpu_valid[i]) + continue; + drm_clflush_pages(obj_priv->page_list + i, 1); + } + drm_agp_chipset_flush(dev); + } + + /* Free the page_cpu_valid mappings which are now stale, whether + * or not we've got I915_GEM_DOMAIN_CPU. + */ + drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, + DRM_MEM_DRIVER); + obj_priv->page_cpu_valid = NULL; +} + +/** + * Set the CPU read domain on a range of the object. + * + * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's + * not entirely valid. The page_cpu_valid member of the object flags which + * pages have been flushed, and will be respected by + * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping + * of the whole object. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +static int +i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, + uint64_t offset, uint64_t size) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int i, ret; + + if (offset == 0 && size == obj->size) + return i915_gem_object_set_to_cpu_domain(obj, 0); + + i915_gem_object_flush_gpu_write_domain(obj); + /* Wait on any GPU rendering and flushing to occur. */ ret = i915_gem_object_wait_rendering(obj); - if (ret) + if (ret != 0) return ret; + i915_gem_object_flush_gtt_write_domain(obj); + /* If we're already fully in the CPU read domain, we're done. */ + if (obj_priv->page_cpu_valid == NULL && + (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) + return 0; + + /* Otherwise, create/clear the per-page CPU read domain flag if we're + * newly adding I915_GEM_DOMAIN_CPU + */ if (obj_priv->page_cpu_valid == NULL) { obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, DRM_MEM_DRIVER); - } + if (obj_priv->page_cpu_valid == NULL) + return -ENOMEM; + } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) + memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); /* Flush the cache on any pages that are still invalid from the CPU's * perspective. */ - for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { + for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; + i++) { if (obj_priv->page_cpu_valid[i]) continue; @@ -1472,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj, obj_priv->page_cpu_valid[i] = 1; } - return 0; -} - -/** - * Once all of the objects have been set in the proper domain, - * perform the necessary flush and invalidate operations. - * - * Returns the write domains flushed, for use in flush tracking. - */ -static uint32_t -i915_gem_dev_set_domain(struct drm_device *dev) -{ - uint32_t flush_domains = dev->flush_domains; - - /* - * Now that all the buffers are synced to the proper domains, - * flush and invalidate the collected domains + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. */ - if (dev->invalidate_domains | dev->flush_domains) { -#if WATCH_EXEC - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", - __func__, - dev->invalidate_domains, - dev->flush_domains); -#endif - i915_gem_flush(dev, - dev->invalidate_domains, - dev->flush_domains); - dev->invalidate_domains = 0; - dev->flush_domains = 0; - } + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); - return flush_domains; + obj->read_domains |= I915_GEM_DOMAIN_CPU; + + return 0; } /** @@ -1585,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, return -EINVAL; } + if (reloc.write_domain & I915_GEM_DOMAIN_CPU || + reloc.read_domains & I915_GEM_DOMAIN_CPU) { + DRM_ERROR("reloc with read/write CPU domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc.target_handle, + (int) reloc.offset, + reloc.read_domains, + reloc.write_domain); + return -EINVAL; + } + if (reloc.write_domain && target_obj->pending_write_domain && reloc.write_domain != target_obj->pending_write_domain) { DRM_ERROR("Write domain conflict: " @@ -1625,19 +1803,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, continue; } - /* Now that we're going to actually write some data in, - * make sure that any rendering using this buffer's contents - * is completed. - */ - i915_gem_object_wait_rendering(obj); - - /* As we're writing through the gtt, flush - * any CPU writes before we write the relocations - */ - if (obj->write_domain & I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - obj->write_domain = 0; + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret != 0) { + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; } /* Map the page containing the relocation we're going to @@ -1779,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, int ret, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains; + int pin_tries; #if WATCH_EXEC DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", @@ -1827,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -EBUSY; } - /* Zero the gloabl flush/invalidate flags. These - * will be modified as each object is bound to the - * gtt - */ - dev->invalidate_domains = 0; - dev->flush_domains = 0; - - /* Look up object handles and perform the relocations */ + /* Look up object handles */ for (i = 0; i < args->buffer_count; i++) { object_list[i] = drm_gem_object_lookup(dev, file_priv, exec_list[i].handle); @@ -1844,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = -EBADF; goto err; } + } - object_list[i]->pending_read_domains = 0; - object_list[i]->pending_write_domain = 0; - ret = i915_gem_object_pin_and_relocate(object_list[i], - file_priv, - &exec_list[i]); - if (ret) { - DRM_ERROR("object bind and relocate failed %d\n", ret); + /* Pin and relocate */ + for (pin_tries = 0; ; pin_tries++) { + ret = 0; + for (i = 0; i < args->buffer_count; i++) { + object_list[i]->pending_read_domains = 0; + object_list[i]->pending_write_domain = 0; + ret = i915_gem_object_pin_and_relocate(object_list[i], + file_priv, + &exec_list[i]); + if (ret) + break; + pinned = i + 1; + } + /* success */ + if (ret == 0) + break; + + /* error other than GTT full, or we've already tried again */ + if (ret != -ENOMEM || pin_tries >= 1) { + DRM_ERROR("Failed to pin buffers %d\n", ret); goto err; } - pinned = i + 1; + + /* unpin all of our buffers */ + for (i = 0; i < pinned; i++) + i915_gem_object_unpin(object_list[i]); + + /* evict everyone we can from the aperture */ + ret = i915_gem_evict_everything(dev); + if (ret) + goto err; } /* Set the pending read domains for the batch buffer to COMMAND */ @@ -1864,21 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_verify_inactive(dev, __FILE__, __LINE__); + /* Zero the global flush/invalidate flags. These + * will be modified as new domains are computed + * for each object + */ + dev->invalidate_domains = 0; + dev->flush_domains = 0; + for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - /* make sure all previous memory operations have passed */ - ret = i915_gem_object_set_domain(obj, - obj->pending_read_domains, - obj->pending_write_domain); - if (ret) - goto err; + /* Compute new gpu domains and update invalidate/flush */ + i915_gem_object_set_to_gpu_domain(obj, + obj->pending_read_domains, + obj->pending_write_domain); } i915_verify_inactive(dev, __FILE__, __LINE__); - /* Flush/invalidate caches and chipset buffer */ - flush_domains = i915_gem_dev_set_domain(dev); + if (dev->invalidate_domains | dev->flush_domains) { +#if WATCH_EXEC + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", + __func__, + dev->invalidate_domains, + dev->flush_domains); +#endif + i915_gem_flush(dev, + dev->invalidate_domains, + dev->flush_domains); + if (dev->flush_domains) + (void)i915_add_request(dev, dev->flush_domains); + } i915_verify_inactive(dev, __FILE__, __LINE__); @@ -1898,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ~0); #endif - (void)i915_add_request(dev, flush_domains); - /* Exec the batchbuffer */ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); if (ret) { @@ -1927,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_file_priv->mm.last_gem_seqno = seqno; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - i915_gem_object_move_to_active(obj); - obj_priv->last_rendering_seqno = seqno; + i915_gem_object_move_to_active(obj, seqno); #if WATCH_LRU DRM_INFO("%s: move to exec list %p\n", __func__, obj); #endif @@ -2061,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, /* XXX - flush the CPU caches for pinned objects * as the X server doesn't manage domains yet */ - if (obj->write_domain & I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - obj->write_domain = 0; - } + i915_gem_object_flush_cpu_write_domain(obj); args->offset = obj_priv->gtt_offset; drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -2167,29 +2361,6 @@ void i915_gem_free_object(struct drm_gem_object *obj) drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); } -static int -i915_gem_set_domain(struct drm_gem_object *obj, - struct drm_file *file_priv, - uint32_t read_domains, - uint32_t write_domain) -{ - struct drm_device *dev = obj->dev; - int ret; - uint32_t flush_domains; - - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - - ret = i915_gem_object_set_domain(obj, read_domains, write_domain); - if (ret) - return ret; - flush_domains = i915_gem_dev_set_domain(obj->dev); - - if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) - (void) i915_add_request(dev, flush_domains); - - return 0; -} - /** Unbinds all objects that are on the given buffer list. */ static int i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c index 93de15b4c9a7..e8d5abe1250e 100644 --- a/drivers/gpu/drm/i915/i915_gem_proc.c +++ b/drivers/gpu/drm/i915/i915_gem_proc.c @@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset, list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { - DRM_PROC_PRINT(" %d @ %d %08x\n", + DRM_PROC_PRINT(" %d @ %d\n", gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies), - gem_request->flush_domains); + (int) (jiffies - gem_request->emitted_jiffies)); } if (len > request + offset) return request; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index e8b85ac4ca04..a8cb69469c64 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) dcc & DCC_CHANNEL_XOR_DISABLE) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; - } else if (IS_I965GM(dev) || IS_GM45(dev)) { - /* GM965 only does bit 11-based channel - * randomization + } else if ((IS_I965GM(dev) || IS_GM45(dev)) && + (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { + /* GM965/GM45 does either bit 11 or bit 17 + * swizzling. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; swizzle_y = I915_BIT_6_SWIZZLE_9_11; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0e476eba36e6..9d24aaeb8a45 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -522,6 +522,7 @@ #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) #define DCC_ADDRESSING_MODE_MASK (3 << 0) #define DCC_CHANNEL_XOR_DISABLE (1 << 10) +#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) /** 965 MCH register controlling DRAM channel configuration */ #define C0DRB3 0x10206 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 7a183789be97..3bbb871b25d5 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -299,7 +299,6 @@ typedef struct drm_radeon_private { atomic_t swi_emitted; int vblank_crtc; uint32_t irq_enable_reg; - int irq_enabled; uint32_t r500_disp_irq_reg; struct radeon_surface surfaces[RADEON_MAX_SURFACES]; diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 97c0599fdb1e..99be11418ac2 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c @@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) else dev_priv->irq_enable_reg &= ~mask; - RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); + if (!dev->irq_enabled) + RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); } static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) @@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) else dev_priv->r500_disp_irq_reg &= ~mask; - RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); + if (!dev->irq_enabled) + RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); } int radeon_enable_vblank(struct drm_device *dev, int crtc) @@ -355,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev) if (!dev_priv) return; - dev_priv->irq_enabled = 0; - if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) RADEON_WRITE(R500_DxMODE_INT_MASK, 0); /* Disable *all* interrupts */ diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c index f4d22ae9d294..e5a8dae4a289 100644 --- a/drivers/i2c/busses/i2c-highlander.c +++ b/drivers/i2c/busses/i2c-highlander.c @@ -92,7 +92,7 @@ static void highlander_i2c_setup(struct highlander_i2c_dev *dev) static void smbus_write_data(u8 *src, u16 *dst, int len) { for (; len > 1; len -= 2) { - *dst++ = be16_to_cpup((u16 *)src); + *dst++ = be16_to_cpup((__be16 *)src); src += 2; } @@ -103,7 +103,7 @@ static void smbus_write_data(u8 *src, u16 *dst, int len) static void smbus_read_data(u16 *src, u8 *dst, int len) { for (; len > 1; len -= 2) { - *(u16 *)dst = cpu_to_be16p(src++); + *(__be16 *)dst = cpu_to_be16p(src++); dst += 2; } diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index dcf2045b5222..0bdb2d7f0570 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -486,7 +486,7 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd( if (cmd->type == MSP_TWI_CMD_WRITE || cmd->type == MSP_TWI_CMD_WRITE_READ) { - __be64 tmp = cpu_to_be64p((u64 *)cmd->write_data); + u64 tmp = be64_to_cpup((__be64 *)cmd->write_data); tmp >>= (MSP_MAX_BYTES_PER_RW - cmd->write_len) * 8; dev_dbg(&pmcmsptwi_adapter.dev, "Writing 0x%016llx\n", tmp); pmcmsptwi_writel(tmp & 0x00000000ffffffffLL, diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 6d7401772a8f..e6857e01d1ba 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -669,10 +669,12 @@ config BLK_DEV_CELLEB endif +# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF config BLK_DEV_IDE_PMAC tristate "PowerMac on-board IDE support" depends on PPC_PMAC && IDE=y select IDE_TIMINGS + select BLK_DEV_IDEDMA_PCI help This driver provides support for the on-board IDE controller on most of the recent Apple Power Macintoshes and PowerBooks. @@ -689,16 +691,6 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST CD-ROM on hda. This option changes this to more natural hda for hard disk and hdc for CD-ROM. -config BLK_DEV_IDEDMA_PMAC - bool "PowerMac IDE DMA support" - depends on BLK_DEV_IDE_PMAC - select BLK_DEV_IDEDMA_PCI - help - This option allows the driver for the on-board IDE controller on - Power Macintoshes and PowerBooks to use DMA (direct memory access) - to transfer data to and from memory. Saying Y is safe and improves - performance. - config BLK_DEV_IDE_AU1XXX bool "IDE for AMD Alchemy Au1200" depends on SOC_AU1200 @@ -912,7 +904,7 @@ config BLK_DEV_UMC8672 endif config BLK_DEV_IDEDMA - def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \ + def_bool BLK_DEV_IDEDMA_SFF || \ BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA endif # IDE diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 7d275b2af3eb..cc35d6dbd410 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -208,8 +208,10 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * */ if (drive->hwif->dma_ops == NULL) break; - if (drive->dev_flags & IDE_DFLAG_USING_DMA) - ide_set_dma(drive); + /* + * TODO: respect IDE_DFLAG_USING_DMA + */ + ide_set_dma(drive); break; } diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index 2e19d6298536..7c481bb56fab 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -66,7 +66,6 @@ typedef struct pmac_ide_hwif { struct macio_dev *mdev; u32 timings[4]; volatile u32 __iomem * *kauai_fcr; -#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC /* Those fields are duplicating what is in hwif. We currently * can't use the hwif ones because of some assumptions that are * beeing done by the generic code about the kind of dma controller @@ -74,8 +73,6 @@ typedef struct pmac_ide_hwif { */ volatile struct dbdma_regs __iomem * dma_regs; struct dbdma_cmd* dma_table_cpu; -#endif - } pmac_ide_hwif_t; enum { @@ -222,8 +219,6 @@ static const char* model_name[] = { #define KAUAI_FCR_UATA_RESET_N 0x00000002 #define KAUAI_FCR_UATA_ENABLE 0x00000001 -#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC - /* Rounded Multiword DMA timings * * I gave up finding a generic formula for all controller @@ -413,8 +408,6 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq); static void pmac_ide_selectproc(ide_drive_t *drive); static void pmac_ide_kauai_selectproc(ide_drive_t *drive); -#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ - #define PMAC_IDE_REG(x) \ ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x))) @@ -584,8 +577,6 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) pmac_ide_do_update_timings(drive); } -#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC - /* * Calculate KeyLargo ATA/66 UDMA timings */ @@ -786,7 +777,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2, drive->name, speed & 0xf, *timings); #endif } -#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) { @@ -804,7 +794,6 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) tl[0] = *timings; tl[1] = *timings2; -#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC if (speed >= XFER_UDMA_0) { if (pmif->kind == controller_kl_ata4) ret = set_timings_udma_ata4(&tl[0], speed); @@ -817,7 +806,7 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) ret = -1; } else set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed); -#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ + if (ret) return; @@ -1008,9 +997,7 @@ static const struct ide_port_info pmac_port_info = { .chipset = ide_pmac, .tp_ops = &pmac_tp_ops, .port_ops = &pmac_ide_port_ops, -#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC .dma_ops = &pmac_dma_ops, -#endif .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | IDE_HFLAG_POST_SET_MODE | IDE_HFLAG_MMIO | @@ -1182,7 +1169,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) pmif->regbase = regbase; pmif->irq = irq; pmif->kauai_fcr = NULL; -#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC + if (macio_resource_count(mdev) >= 2) { if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) printk(KERN_WARNING "ide-pmac: can't request DMA " @@ -1192,7 +1179,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); } else pmif->dma_regs = NULL; -#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ + dev_set_drvdata(&mdev->ofdev.dev, pmif); memset(&hw, 0, sizeof(hw)); @@ -1300,9 +1287,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) base = ioremap(rbase, rlen); pmif->regbase = (unsigned long) base + 0x2000; -#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC pmif->dma_regs = base + 0x1000; -#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ pmif->kauai_fcr = base; pmif->irq = pdev->irq; @@ -1434,8 +1419,6 @@ out: return error; } -#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC - /* * pmac_ide_build_dmatable builds the DBDMA command list * for a transfer and sets the DBDMA channel to point to it. @@ -1723,13 +1706,6 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif, return 0; } -#else -static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif, - const struct ide_port_info *d) -{ - return -EOPNOTSUPP; -} -#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ module_init(pmac_ide_probe); diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c index 7defa0ae2014..a687a7dfea6f 100644 --- a/drivers/ide/sgiioc4.c +++ b/drivers/ide/sgiioc4.c @@ -550,7 +550,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = { .dma_timeout = ide_dma_timeout, }; -static const struct ide_port_info sgiioc4_port_info __devinitdata = { +static const struct ide_port_info sgiioc4_port_info __devinitconst = { .name = DRV_NAME, .chipset = ide_pci, .init_dma = ide_dma_sgiioc4, @@ -633,7 +633,7 @@ out: return ret; } -int +int __devinit ioc4_ide_attach_one(struct ioc4_driver_data *idd) { /* PCI-RT does not bring out IDE connection. @@ -645,7 +645,7 @@ ioc4_ide_attach_one(struct ioc4_driver_data *idd) return pci_init_sgiioc4(idd->idd_pdev); } -static struct ioc4_submodule ioc4_ide_submodule = { +static struct ioc4_submodule __devinitdata ioc4_ide_submodule = { .is_name = "IOC4_ide", .is_owner = THIS_MODULE, .is_probe = ioc4_ide_attach_one, diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 9e39f73282ee..d333ae22459c 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -1685,6 +1685,7 @@ static int nodemgr_host_thread(void *data) g = get_hpsb_generation(host); for (i = 0; i < 4 ; i++) { msleep_interruptible(63); + try_to_freeze(); if (kthread_should_stop()) goto exit; @@ -1725,6 +1726,7 @@ static int nodemgr_host_thread(void *data) /* Sleep 3 seconds */ for (i = 3000/200; i; i--) { msleep_interruptible(200); + try_to_freeze(); if (kthread_should_stop()) goto exit; diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c index 1e288eeb5e2a..6461a32bc838 100644 --- a/drivers/isdn/hardware/avm/b1isa.c +++ b/drivers/isdn/hardware/avm/b1isa.c @@ -233,10 +233,8 @@ static void __exit b1isa_exit(void) int i; for (i = 0; i < MAX_CARDS; i++) { - if (!io[i]) - break; - - b1isa_remove(&isa_dev[i]); + if (isa_dev[i].resource[0].start) + b1isa_remove(&isa_dev[i]); } unregister_capi_driver(&capi_driver_b1isa); } diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c index cfa8fa5e44ab..3f2a0a20c19b 100644 --- a/drivers/isdn/hysdn/hysdn_net.c +++ b/drivers/isdn/hysdn/hysdn_net.c @@ -83,12 +83,12 @@ net_open(struct net_device *dev) /* Fill in the MAC-level header (if not already set) */ if (!card->mac_addr[0]) { - for (i = 0; i < ETH_ALEN - sizeof(unsigned long); i++) + for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = 0xfc; if ((in_dev = dev->ip_ptr) != NULL) { struct in_ifaddr *ifa = in_dev->ifa_list; if (ifa != NULL) - memcpy(dev->dev_addr + (ETH_ALEN - sizeof(unsigned long)), &ifa->ifa_local, sizeof(unsigned long)); + memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local)); } } else memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index a63161aec487..04e5fd742c2c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -668,7 +668,7 @@ static void check_for_valid_limits(struct io_restrictions *rs) if (!rs->max_segment_size) rs->max_segment_size = MAX_SEGMENT_SIZE; if (!rs->seg_boundary_mask) - rs->seg_boundary_mask = -1; + rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; if (!rs->bounce_pfn) rs->bounce_pfn = -1; } diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 84bdc2ee69e6..a443e136dc41 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -354,7 +354,7 @@ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) * @req: the request to prepare * * Allocate the necessary i2o_block_request struct and connect it to - * the request. This is needed that we not loose the SG list later on. + * the request. This is needed that we not lose the SG list later on. * * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. */ diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index be2b5926d26c..6e53a30bfd38 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c @@ -49,7 +49,6 @@ static int i2o_hrt_get(struct i2o_controller *c); /** * i2o_msg_get_wait - obtain an I2O message from the IOP * @c: I2O controller - * @msg: pointer to a I2O message pointer * @wait: how long to wait until timeout * * This function waits up to wait seconds for a message slot to be diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 8c389d606c30..3ee698ad8599 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -254,7 +254,11 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, return 1; *paddr = pte_pfn(pte) << PAGE_SHIFT; +#ifdef CONFIG_HUGETLB_PAGE *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; +#else + *pageshift = PAGE_SHIFT; +#endif return 0; err: diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 76a76751da36..6659b2275c0c 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -37,9 +37,9 @@ #define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */ #define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ #define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ -#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ +#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ #define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ -#define OPCODE_BE 0xc7 /* Erase whole flash block */ +#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */ #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ #define OPCODE_RDID 0x9f /* Read JEDEC ID */ @@ -167,7 +167,7 @@ static int wait_till_ready(struct m25p *flash) * * Returns 0 if successful, non-zero otherwise. */ -static int erase_block(struct m25p *flash) +static int erase_chip(struct m25p *flash) { DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", flash->spi->dev.bus_id, __func__, @@ -181,7 +181,7 @@ static int erase_block(struct m25p *flash) write_enable(flash); /* Set up command buffer. */ - flash->command[0] = OPCODE_BE; + flash->command[0] = OPCODE_CHIP_ERASE; spi_write(flash->spi, flash->command, 1); @@ -250,15 +250,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) mutex_lock(&flash->lock); - /* REVISIT in some cases we could speed up erasing large regions - * by using OPCODE_SE instead of OPCODE_BE_4K - */ - - /* now erase those sectors */ - if (len == flash->mtd.size && erase_block(flash)) { + /* whole-chip erase? */ + if (len == flash->mtd.size && erase_chip(flash)) { instr->state = MTD_ERASE_FAILED; mutex_unlock(&flash->lock); return -EIO; + + /* REVISIT in some cases we could speed up erasing large regions + * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up + * to use "small sector erase", but that's not always optimal. + */ + + /* "sector"-at-a-time erase */ } else { while (len) { if (erase_sector(flash, addr)) { @@ -574,10 +577,11 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) for (tmp = 0, info = m25p_data; tmp < ARRAY_SIZE(m25p_data); tmp++, info++) { - if (info->jedec_id == jedec) - if (ext_jedec != 0 && info->ext_id != ext_jedec) + if (info->jedec_id == jedec) { + if (info->ext_id != 0 && info->ext_id != ext_jedec) continue; return info; + } } dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); return NULL; diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 42d844f8f6bf..dfbf3f270cea 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c @@ -19,7 +19,7 @@ #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/concat.h> -#include <asm/io.h> +#include <linux/io.h> #define MAX_RESOURCES 4 @@ -27,7 +27,6 @@ struct physmap_flash_info { struct mtd_info *mtd[MAX_RESOURCES]; struct mtd_info *cmtd; struct map_info map[MAX_RESOURCES]; - struct resource *res; #ifdef CONFIG_MTD_PARTITIONS int nr_parts; struct mtd_partition *parts; @@ -70,16 +69,7 @@ static int physmap_flash_remove(struct platform_device *dev) #endif map_destroy(info->mtd[i]); } - - if (info->map[i].virt != NULL) - iounmap(info->map[i].virt); - } - - if (info->res != NULL) { - release_resource(info->res); - kfree(info->res); } - return 0; } @@ -101,7 +91,8 @@ static int physmap_flash_probe(struct platform_device *dev) if (physmap_data == NULL) return -ENODEV; - info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL); + info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info), + GFP_KERNEL); if (info == NULL) { err = -ENOMEM; goto err_out; @@ -114,10 +105,10 @@ static int physmap_flash_probe(struct platform_device *dev) (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1), (unsigned long long)dev->resource[i].start); - info->res = request_mem_region(dev->resource[i].start, - dev->resource[i].end - dev->resource[i].start + 1, - dev->dev.bus_id); - if (info->res == NULL) { + if (!devm_request_mem_region(&dev->dev, + dev->resource[i].start, + dev->resource[i].end - dev->resource[i].start + 1, + dev->dev.bus_id)) { dev_err(&dev->dev, "Could not reserve memory region\n"); err = -ENOMEM; goto err_out; @@ -129,7 +120,8 @@ static int physmap_flash_probe(struct platform_device *dev) info->map[i].bankwidth = physmap_data->width; info->map[i].set_vpp = physmap_data->set_vpp; - info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size); + info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys, + info->map[i].size); if (info->map[i].virt == NULL) { dev_err(&dev->dev, "Failed to ioremap flash region\n"); err = EIO; diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index 024e3fffd4bb..a83192f80eba 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c @@ -163,9 +163,11 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun, ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); #ifdef CONFIG_MTD_OF_PARTS - if (ret == 0) - ret = of_mtd_parse_partitions(fun->dev, &fun->mtd, - flash_np, &fun->parts); + if (ret == 0) { + ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts); + if (ret < 0) + goto err; + } #endif if (ret > 0) ret = add_mtd_partitions(&fun->mtd, fun->parts, ret); diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index 75c899039023..9bd6c9ac8443 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c @@ -141,6 +141,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev, } lpcctl = pci_resource_start(pdev, 0); + pci_dev_put(pdev); if (!request_region(lpcctl, 4, driver_name)) { err = -EBUSY; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index c0fa9c9edf08..15f0a26730ae 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -269,6 +269,7 @@ static struct pxa3xx_nand_timing stm2GbX16_timing = { static struct pxa3xx_nand_flash stm2GbX16 = { .timing = &stm2GbX16_timing, + .cmdset = &largepage_cmdset, .page_per_block = 64, .page_size = 2048, .flash_width = 16, diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index e39b21d3e168..a7e4d985f5ef 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -32,19 +32,18 @@ #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> -#include <asm/io.h> #include <asm/mach/flash.h> -#include <asm/arch/gpmc.h> -#include <asm/arch/onenand.h> -#include <asm/arch/gpio.h> -#include <asm/arch/pm.h> +#include <mach/gpmc.h> +#include <mach/onenand.h> +#include <mach/gpio.h> +#include <mach/pm.h> -#include <linux/dma-mapping.h> -#include <asm/dma-mapping.h> -#include <asm/arch/dma.h> +#include <mach/dma.h> -#include <asm/arch/board.h> +#include <mach/board.h> #define DRIVER_NAME "omap2-onenand" diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d07e3f148951..a1a3d0e5d2b4 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -3144,6 +3144,28 @@ bnx2_has_work(struct bnx2_napi *bnapi) return 0; } +static void +bnx2_chk_missed_msi(struct bnx2 *bp) +{ + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + u32 msi_ctrl; + + if (bnx2_has_work(bnapi)) { + msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL); + if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE)) + return; + + if (bnapi->last_status_idx == bp->idle_chk_status_idx) { + REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & + ~BNX2_PCICFG_MSI_CONTROL_ENABLE); + REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); + bnx2_msi(bp->irq_tbl[0].vector, bnapi); + } + } + + bp->idle_chk_status_idx = bnapi->last_status_idx; +} + static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) { struct status_block *sblk = bnapi->status_blk.msi; @@ -3218,14 +3240,15 @@ static int bnx2_poll(struct napi_struct *napi, int budget) work_done = bnx2_poll_work(bp, bnapi, work_done, budget); - if (unlikely(work_done >= budget)) - break; - /* bnapi->last_status_idx is used below to tell the hw how * much work has been processed, so we must read it before * checking for more work. */ bnapi->last_status_idx = sblk->status_idx; + + if (unlikely(work_done >= budget)) + break; + rmb(); if (likely(!bnx2_has_work(bnapi))) { netif_rx_complete(bp->dev, napi); @@ -4570,6 +4593,8 @@ bnx2_init_chip(struct bnx2 *bp) for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) bp->bnx2_napi[i].last_status_idx = 0; + bp->idle_chk_status_idx = 0xffff; + bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; /* Set up how to generate a link change interrupt. */ @@ -5718,6 +5743,10 @@ bnx2_timer(unsigned long data) if (atomic_read(&bp->intr_sem) != 0) goto bnx2_restart_timer; + if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) == + BNX2_FLAG_USING_MSI) + bnx2_chk_missed_msi(bp); + bnx2_send_heart_beat(bp); bp->stats_blk->stat_FwRxDrop = diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 617d95340160..0b032c3c7b61 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -378,6 +378,9 @@ struct l2_fhdr { * pci_config_l definition * offset: 0000 */ +#define BNX2_PCICFG_MSI_CONTROL 0x00000058 +#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16) + #define BNX2_PCICFG_MISC_CONFIG 0x00000068 #define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) #define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) @@ -6863,6 +6866,9 @@ struct bnx2 { u8 num_tx_rings; u8 num_rx_rings; + + u32 idle_chk_status_idx; + }; #define REG_RD(bp, offset) \ diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c index e1b441effbbe..c414554ac321 100644 --- a/drivers/net/enc28j60.c +++ b/drivers/net/enc28j60.c @@ -568,6 +568,17 @@ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end) return erxrdpt; } +/* + * Calculate wrap around when reading beyond the end of the RX buffer + */ +static u16 rx_packet_start(u16 ptr) +{ + if (ptr + RSV_SIZE > RXEND_INIT) + return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1); + else + return ptr + RSV_SIZE; +} + static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) { u16 erxrdpt; @@ -938,8 +949,9 @@ static void enc28j60_hw_rx(struct net_device *ndev) skb->dev = ndev; skb_reserve(skb, NET_IP_ALIGN); /* copy the packet from the receive buffer */ - enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), - len, skb_put(skb, len)); + enc28j60_mem_read(priv, + rx_packet_start(priv->next_pk_ptr), + len, skb_put(skb, len)); if (netif_msg_pktdata(priv)) dump_packet(__func__, skb->len, skb->data); skb->protocol = eth_type_trans(skb, ndev); diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c index b9bed82e1d21..b289a0a2b945 100644 --- a/drivers/net/netx-eth.c +++ b/drivers/net/netx-eth.c @@ -401,6 +401,8 @@ static int netx_eth_drv_probe(struct platform_device *pdev) priv->xmac_base = priv->xc->xmac_base; priv->sram_base = priv->xc->sram_base; + spin_lock_init(&priv->lock); + ret = pfifo_request(PFIFO_MASK(priv->id)); if (ret) { printk("unable to request PFIFO\n"); diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index dcce3542d5a7..7a9f901d4ff6 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -3897,6 +3897,7 @@ static int ipw_disassociate(void *data) if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) return 0; ipw_send_disassociate(data, 0); + netif_carrier_off(priv->net_dev); return 1; } @@ -10190,6 +10191,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, u16 remaining_bytes; int fc; + if (!(priv->status & STATUS_ASSOCIATED)) + goto drop; + hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); switch (priv->ieee->iw_mode) { case IW_MODE_ADHOC: diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 4c312c55f90c..01a845851338 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -290,6 +290,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv) priv->num_stations = 0; memset(priv->stations, 0, sizeof(priv->stations)); + /* clean ucode key table bit map */ + priv->ucode_key_table = 0; + spin_unlock_irqrestore(&priv->sta_lock, flags); } EXPORT_SYMBOL(iwl_clear_stations_table); diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 61797f3f8d5c..26f7084d3011 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -475,7 +475,7 @@ static int iwl_get_free_ucode_key_index(struct iwl_priv *priv) if (!test_and_set_bit(i, &priv->ucode_key_table)) return i; - return -1; + return WEP_INVALID_OFFSET; } int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) @@ -620,6 +620,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, /* else, we are overriding an existing key => no need to allocated room * in uCode. */ + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for new kew"); + priv->stations[sta_id].sta.key.key_flags = key_flags; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; @@ -637,6 +640,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, { unsigned long flags; __le16 key_flags = 0; + int ret; key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); @@ -664,14 +668,18 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, /* else, we are overriding an existing key => no need to allocated room * in uCode. */ + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for new kew"); + priv->stations[sta_id].sta.key.key_flags = key_flags; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + spin_unlock_irqrestore(&priv->sta_lock, flags); - IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); - return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + return ret; } static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, @@ -696,6 +704,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, /* else, we are overriding an existing key => no need to allocated room * in uCode. */ + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for new kew"); + /* This copy is acutally not needed: we get the key with each TX */ memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); @@ -734,6 +745,13 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, return 0; } + if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { + IWL_WARNING("Removing wrong key %d 0x%x\n", + keyconf->keyidx, key_flags); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, &priv->ucode_key_table)) IWL_ERROR("index %d not used in uCode key table.\n", diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index fe1867b25ff7..cac732f4047f 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -615,7 +615,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr, struct ieee80211_hdr *tx_hdr; tx_hdr = (struct ieee80211_hdr *)skb->data; - if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) + if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN))) { __skb_unlink(skb, q); tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 8f63f4c6b85f..9aad608bcf3f 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -16,6 +16,7 @@ #include <linux/pm.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/jiffies.h> #include <linux/pci-aspm.h> #include "../pci.h" @@ -161,11 +162,12 @@ static void pcie_check_clock_pm(struct pci_dev *pdev) */ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) { - int pos, child_pos; + int pos, child_pos, i = 0; u16 reg16 = 0; struct pci_dev *child_dev; int same_clock = 1; - + unsigned long start_jiffies; + u16 child_regs[8], parent_reg; /* * all functions of a slot should have the same Slot Clock * Configuration, so just check one function @@ -191,16 +193,19 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, ®16); + child_regs[i] = reg16; if (same_clock) reg16 |= PCI_EXP_LNKCTL_CCC; else reg16 &= ~PCI_EXP_LNKCTL_CCC; pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, reg16); + i++; } /* Configure upstream component */ pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); + parent_reg = reg16; if (same_clock) reg16 |= PCI_EXP_LNKCTL_CCC; else @@ -212,12 +217,30 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); /* Wait for link training end */ - while (1) { + /* break out after waiting for 1 second */ + start_jiffies = jiffies; + while ((jiffies - start_jiffies) < HZ) { pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16); if (!(reg16 & PCI_EXP_LNKSTA_LT)) break; cpu_relax(); } + /* training failed -> recover */ + if ((jiffies - start_jiffies) >= HZ) { + dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure" + " common clock\n"); + i = 0; + list_for_each_entry(child_dev, &pdev->subordinate->devices, + bus_list) { + child_pos = pci_find_capability(child_dev, + PCI_CAP_ID_EXP); + pci_write_config_word(child_dev, + child_pos + PCI_EXP_LNKCTL, + child_regs[i]); + i++; + } + pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg); + } } /* diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 4dd1c3e157ae..5a8ccb4f604d 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -253,6 +253,7 @@ placeholder: __func__, pci_domain_nr(parent), parent->number, slot_nr); out: + kfree(slot_name); up_write(&pci_bus_sem); return slot; err: diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c index 341d7a5b45a2..4e91419e8911 100644 --- a/drivers/rtc/rtc-ds1672.c +++ b/drivers/rtc/rtc-ds1672.c @@ -209,12 +209,18 @@ static int ds1672_probe(struct i2c_client *client, return err; } +static struct i2c_device_id ds1672_id[] = { + { "ds1672", 0 }, + { } +}; + static struct i2c_driver ds1672_driver = { .driver = { .name = "rtc-ds1672", }, .probe = &ds1672_probe, .remove = &ds1672_remove, + .id_table = ds1672_id, }; static int __init ds1672_init(void) diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c index 80782798763f..a4f6665ab3c5 100644 --- a/drivers/rtc/rtc-max6900.c +++ b/drivers/rtc/rtc-max6900.c @@ -247,12 +247,18 @@ max6900_probe(struct i2c_client *client, const struct i2c_device_id *id) return 0; } +static struct i2c_device_id max6900_id[] = { + { "max6900", 0 }, + { } +}; + static struct i2c_driver max6900_driver = { .driver = { .name = "rtc-max6900", }, .probe = max6900_probe, .remove = max6900_remove, + .id_table = max6900_id, }; static int __init max6900_init(void) diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c index abe87a4d2665..01d8da9afdc8 100644 --- a/drivers/rtc/rtc-twl4030.c +++ b/drivers/rtc/rtc-twl4030.c @@ -337,7 +337,7 @@ static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd, } #else -#define omap_rtc_ioctl NULL +#define twl4030_rtc_ioctl NULL #endif static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc) diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 9aec4ca64e56..f7da7530875e 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -107,6 +107,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) struct request *req; int ret; +retry: req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; @@ -121,7 +122,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); req->sense_len = 0; -retry: ret = blk_execute_rq(req->q, NULL, req, 1); if (ret == -EIO) { if (req->sense_len > 0) { @@ -136,8 +136,10 @@ retry: h->path_state = HP_SW_PATH_ACTIVE; ret = SCSI_DH_OK; } - if (ret == SCSI_DH_IMM_RETRY) + if (ret == SCSI_DH_IMM_RETRY) { + blk_put_request(req); goto retry; + } if (ret == SCSI_DH_DEV_OFFLINED) { h->path_state = HP_SW_PATH_PASSIVE; ret = SCSI_DH_OK; @@ -200,6 +202,7 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h) struct request *req; int ret, retry; +retry: req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; @@ -216,7 +219,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h) req->sense_len = 0; retry = h->retries; -retry: ret = blk_execute_rq(req->q, NULL, req, 1); if (ret == -EIO) { if (req->sense_len > 0) { @@ -231,8 +233,10 @@ retry: ret = SCSI_DH_OK; if (ret == SCSI_DH_RETRY) { - if (--retry) + if (--retry) { + blk_put_request(req); goto retry; + } ret = SCSI_DH_IO; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index c9e1242eaf25..5081b3981d3c 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -757,7 +757,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode, * access to the device is prohibited. */ error = scsi_nonblockable_ioctl(sdp, cmd, p, - (mode & FMODE_NDELAY_NOW) != 0); + (mode & FMODE_NDELAY) != 0); if (!scsi_block_when_processing_errors(sdp) || !error) return error; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 62b6633e3a97..45b66b98a516 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -521,7 +521,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, * if it doesn't recognise the ioctl */ ret = scsi_nonblockable_ioctl(sdev, cmd, argp, - (mode & FMODE_NDELAY_NOW) != 0); + (mode & FMODE_NDELAY) != 0); if (ret != -ENODEV) return ret; return scsi_ioctl(sdev, cmd, argp); diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c index 6dd98f9fb89c..ae3699d77dd0 100644 --- a/drivers/serial/ioc3_serial.c +++ b/drivers/serial/ioc3_serial.c @@ -2149,7 +2149,7 @@ out4: return ret; } -static struct ioc3_submodule ioc3uart_submodule = { +static struct ioc3_submodule ioc3uart_ops = { .name = "IOC3uart", .probe = ioc3uart_probe, .remove = ioc3uart_remove, @@ -2173,7 +2173,7 @@ static int __devinit ioc3uart_init(void) __func__); return ret; } - ret = ioc3_register_submodule(&ioc3uart_submodule); + ret = ioc3_register_submodule(&ioc3uart_ops); if (ret) uart_unregister_driver(&ioc3_uart); return ret; @@ -2181,7 +2181,7 @@ static int __devinit ioc3uart_init(void) static void __devexit ioc3uart_exit(void) { - ioc3_unregister_submodule(&ioc3uart_submodule); + ioc3_unregister_submodule(&ioc3uart_ops); uart_unregister_driver(&ioc3_uart); } diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index 6117d3db0b66..28c00c3d58f5 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -591,8 +591,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new, /* Update the per-port timeout */ uart_update_timeout(port, new->c_cflag, baud); - /* Do our best to flush TX & RX, so we don't loose anything */ - /* But we don't wait indefinitly ! */ + /* Do our best to flush TX & RX, so we don't lose anything */ + /* But we don't wait indefinitely ! */ j = 5000000; /* Maximum wait */ /* FIXME Can't receive chars since set_termios might be called at early * boot for the console, all stuff is not yet ready to receive at that diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c index 317d239ab740..29cbb0afef8e 100644 --- a/drivers/serial/s3c2440.c +++ b/drivers/serial/s3c2440.c @@ -177,5 +177,5 @@ module_exit(s3c2440_serial_exit); MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); -MODULE_LICENSE("GPLi v2"); +MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:s3c2440-uart"); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 51d7bdea2869..aad1359a3eb1 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1498,7 +1498,7 @@ static int ftdi_open(struct tty_struct *tty, priv->interface, buf, 0, WDR_TIMEOUT); /* Termios defaults are set by usb_serial_init. We don't change - port->tty->termios - this would loose speed settings, etc. + port->tty->termios - this would lose speed settings, etc. This is same behaviour as serial.c/rs_open() - Kuba */ /* ftdi_set_termios will send usb control messages */ diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c index a547e5d4c8bf..a469a3d6edcb 100644 --- a/drivers/video/aty/radeon_accel.c +++ b/drivers/video/aty/radeon_accel.c @@ -5,61 +5,61 @@ * --dte */ -#define FLUSH_CACHE_WORKAROUND 1 - -void radeon_fifo_update_and_wait(struct radeonfb_info *rinfo, int entries) +static void radeon_fixup_offset(struct radeonfb_info *rinfo) { - int i; + u32 local_base; + + /* *** Ugly workaround *** */ + /* + * On some platforms, the video memory is mapped at 0 in radeon chip space + * (like PPCs) by the firmware. X will always move it up so that it's seen + * by the chip to be at the same address as the PCI BAR. + * That means that when switching back from X, there is a mismatch between + * the offsets programmed into the engine. This means that potentially, + * accel operations done before radeonfb has a chance to re-init the engine + * will have incorrect offsets, and potentially trash system memory ! + * + * The correct fix is for fbcon to never call any accel op before the engine + * has properly been re-initialized (by a call to set_var), but this is a + * complex fix. This workaround in the meantime, called before every accel + * operation, makes sure the offsets are in sync. + */ - for (i=0; i<2000000; i++) { - rinfo->fifo_free = INREG(RBBM_STATUS) & 0x7f; - if (rinfo->fifo_free >= entries) - return; - udelay(10); - } - printk(KERN_ERR "radeonfb: FIFO Timeout !\n"); - /* XXX Todo: attempt to reset the engine */ -} + radeon_fifo_wait (1); + local_base = INREG(MC_FB_LOCATION) << 16; + if (local_base == rinfo->fb_local_base) + return; -static inline void radeon_fifo_wait(struct radeonfb_info *rinfo, int entries) -{ - if (entries <= rinfo->fifo_free) - rinfo->fifo_free -= entries; - else - radeon_fifo_update_and_wait(rinfo, entries); -} + rinfo->fb_local_base = local_base; -static inline void radeonfb_set_creg(struct radeonfb_info *rinfo, u32 reg, - u32 *cache, u32 new_val) -{ - if (new_val == *cache) - return; - *cache = new_val; - radeon_fifo_wait(rinfo, 1); - OUTREG(reg, new_val); + radeon_fifo_wait (3); + OUTREG(DEFAULT_PITCH_OFFSET, (rinfo->pitch << 0x16) | + (rinfo->fb_local_base >> 10)); + OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10)); + OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10)); } static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo, const struct fb_fillrect *region) { - radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache, - rinfo->dp_gui_mc_base | GMC_BRUSH_SOLID_COLOR | ROP3_P); - radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache, - DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); - radeonfb_set_creg(rinfo, DP_BRUSH_FRGD_CLR, &rinfo->dp_brush_fg_cache, - region->color); - - /* Ensure the dst cache is flushed and the engine idle before - * issuing the operation. - * - * This works around engine lockups on some cards - */ -#if FLUSH_CACHE_WORKAROUND - radeon_fifo_wait(rinfo, 2); + radeon_fifo_wait(4); + + OUTREG(DP_GUI_MASTER_CNTL, + rinfo->dp_gui_master_cntl /* contains, like GMC_DST_32BPP */ + | GMC_BRUSH_SOLID_COLOR + | ROP3_P); + if (radeon_get_dstbpp(rinfo->depth) != DST_8BPP) + OUTREG(DP_BRUSH_FRGD_CLR, rinfo->pseudo_palette[region->color]); + else + OUTREG(DP_BRUSH_FRGD_CLR, region->color); + OUTREG(DP_WRITE_MSK, 0xffffffff); + OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM)); + + radeon_fifo_wait(2); OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); -#endif - radeon_fifo_wait(rinfo, 2); + + radeon_fifo_wait(2); OUTREG(DST_Y_X, (region->dy << 16) | region->dx); OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height); } @@ -70,14 +70,15 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region) struct fb_fillrect modded; int vxres, vyres; - WARN_ON(rinfo->gfx_mode); - if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode) + if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_fillrect(info, region); return; } + radeon_fixup_offset(rinfo); + vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; @@ -90,10 +91,6 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region) if(modded.dx + modded.width > vxres) modded.width = vxres - modded.dx; if(modded.dy + modded.height > vyres) modded.height = vyres - modded.dy; - if (info->fix.visual == FB_VISUAL_TRUECOLOR || - info->fix.visual == FB_VISUAL_DIRECTCOLOR ) - modded.color = ((u32 *) (info->pseudo_palette))[region->color]; - radeonfb_prim_fillrect(rinfo, &modded); } @@ -112,22 +109,22 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo, if ( xdir < 0 ) { sx += w-1; dx += w-1; } if ( ydir < 0 ) { sy += h-1; dy += h-1; } - radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache, - rinfo->dp_gui_mc_base | - GMC_BRUSH_NONE | - GMC_SRC_DATATYPE_COLOR | - ROP3_S | - DP_SRC_SOURCE_MEMORY); - radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache, - (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0) | - (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0)); - -#if FLUSH_CACHE_WORKAROUND - radeon_fifo_wait(rinfo, 2); + radeon_fifo_wait(3); + OUTREG(DP_GUI_MASTER_CNTL, + rinfo->dp_gui_master_cntl /* i.e. GMC_DST_32BPP */ + | GMC_BRUSH_NONE + | GMC_SRC_DSTCOLOR + | ROP3_S + | DP_SRC_SOURCE_MEMORY ); + OUTREG(DP_WRITE_MSK, 0xffffffff); + OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0) + | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0)); + + radeon_fifo_wait(2); OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); -#endif - radeon_fifo_wait(rinfo, 3); + + radeon_fifo_wait(3); OUTREG(SRC_Y_X, (sy << 16) | sx); OUTREG(DST_Y_X, (dy << 16) | dx); OUTREG(DST_HEIGHT_WIDTH, (h << 16) | w); @@ -146,14 +143,15 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) modded.width = area->width; modded.height = area->height; - WARN_ON(rinfo->gfx_mode); - if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode) + if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_copyarea(info, area); return; } + radeon_fixup_offset(rinfo); + vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; @@ -170,115 +168,13 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) radeonfb_prim_copyarea(rinfo, &modded); } -static void radeonfb_prim_imageblit(struct radeonfb_info *rinfo, - const struct fb_image *image, - u32 fg, u32 bg) -{ - unsigned int dwords; - u32 *bits; - - radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache, - rinfo->dp_gui_mc_base | - GMC_BRUSH_NONE | GMC_DST_CLIP_LEAVE | - GMC_SRC_DATATYPE_MONO_FG_BG | - ROP3_S | - GMC_BYTE_ORDER_MSB_TO_LSB | - DP_SRC_SOURCE_HOST_DATA); - radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache, - DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); - radeonfb_set_creg(rinfo, DP_SRC_FRGD_CLR, &rinfo->dp_src_fg_cache, fg); - radeonfb_set_creg(rinfo, DP_SRC_BKGD_CLR, &rinfo->dp_src_bg_cache, bg); - - /* Ensure the dst cache is flushed and the engine idle before - * issuing the operation. - * - * This works around engine lockups on some cards - */ -#if FLUSH_CACHE_WORKAROUND - radeon_fifo_wait(rinfo, 2); - OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); - OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); -#endif - - /* X here pads width to a multiple of 32 and uses the clipper to - * adjust the result. Is that really necessary ? Things seem to - * work ok for me without that and the doco doesn't seem to imply] - * there is such a restriction. - */ - radeon_fifo_wait(rinfo, 4); - OUTREG(SC_TOP_LEFT, (image->dy << 16) | image->dx); - OUTREG(SC_BOTTOM_RIGHT, ((image->dy + image->height) << 16) | - (image->dx + image->width)); - OUTREG(DST_Y_X, (image->dy << 16) | image->dx); - - OUTREG(DST_HEIGHT_WIDTH, (image->height << 16) | ((image->width + 31) & ~31)); - - dwords = (image->width + 31) >> 5; - dwords *= image->height; - bits = (u32*)(image->data); - - while(dwords >= 8) { - radeon_fifo_wait(rinfo, 8); -#if BITS_PER_LONG == 64 - __raw_writeq(*((u64 *)(bits)), rinfo->mmio_base + HOST_DATA0); - __raw_writeq(*((u64 *)(bits+2)), rinfo->mmio_base + HOST_DATA2); - __raw_writeq(*((u64 *)(bits+4)), rinfo->mmio_base + HOST_DATA4); - __raw_writeq(*((u64 *)(bits+6)), rinfo->mmio_base + HOST_DATA6); - bits += 8; -#else - __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA0); - __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA1); - __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA2); - __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA3); - __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA4); - __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA5); - __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA6); - __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA7); -#endif - dwords -= 8; - } - while(dwords--) { - radeon_fifo_wait(rinfo, 1); - __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA0); - } -} - void radeonfb_imageblit(struct fb_info *info, const struct fb_image *image) { struct radeonfb_info *rinfo = info->par; - u32 fg, bg; - - WARN_ON(rinfo->gfx_mode); - if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode) - return; - if (!image->width || !image->height) + if (info->state != FBINFO_STATE_RUNNING) return; - - /* We only do 1 bpp color expansion for now */ - if (info->flags & FBINFO_HWACCEL_DISABLED || image->depth != 1) - goto fallback; - - /* Fallback if running out of the screen. We may do clipping - * in the future */ - if ((image->dx + image->width) > info->var.xres_virtual || - (image->dy + image->height) > info->var.yres_virtual) - goto fallback; - - if (info->fix.visual == FB_VISUAL_TRUECOLOR || - info->fix.visual == FB_VISUAL_DIRECTCOLOR) { - fg = ((u32*)(info->pseudo_palette))[image->fg_color]; - bg = ((u32*)(info->pseudo_palette))[image->bg_color]; - } else { - fg = image->fg_color; - bg = image->bg_color; - } - - radeonfb_prim_imageblit(rinfo, image, fg, bg); - return; - - fallback: - radeon_engine_idle(rinfo); + radeon_engine_idle(); cfb_imageblit(info, image); } @@ -289,8 +185,7 @@ int radeonfb_sync(struct fb_info *info) if (info->state != FBINFO_STATE_RUNNING) return 0; - - radeon_engine_idle(rinfo); + radeon_engine_idle(); return 0; } @@ -366,10 +261,9 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo) /* disable 3D engine */ OUTREG(RB3D_CNTL, 0); - rinfo->fifo_free = 0; radeonfb_engine_reset(rinfo); - radeon_fifo_wait(rinfo, 1); + radeon_fifo_wait (1); if (IS_R300_VARIANT(rinfo)) { OUTREG(RB2D_DSTCACHE_MODE, INREG(RB2D_DSTCACHE_MODE) | RB2D_DC_AUTOFLUSH_ENABLE | @@ -383,7 +277,7 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo) OUTREG(RB2D_DSTCACHE_MODE, 0); } - radeon_fifo_wait(rinfo, 3); + radeon_fifo_wait (3); /* We re-read MC_FB_LOCATION from card as it can have been * modified by XFree drivers (ouch !) */ @@ -394,57 +288,41 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo) OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10)); OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10)); - radeon_fifo_wait(rinfo, 1); -#ifdef __BIG_ENDIAN + radeon_fifo_wait (1); +#if defined(__BIG_ENDIAN) OUTREGP(DP_DATATYPE, HOST_BIG_ENDIAN_EN, ~HOST_BIG_ENDIAN_EN); #else OUTREGP(DP_DATATYPE, 0, ~HOST_BIG_ENDIAN_EN); #endif - radeon_fifo_wait(rinfo, 2); + radeon_fifo_wait (2); OUTREG(DEFAULT_SC_TOP_LEFT, 0); OUTREG(DEFAULT_SC_BOTTOM_RIGHT, (DEFAULT_SC_RIGHT_MAX | DEFAULT_SC_BOTTOM_MAX)); - /* set default DP_GUI_MASTER_CNTL */ temp = radeon_get_dstbpp(rinfo->depth); - rinfo->dp_gui_mc_base = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS); + rinfo->dp_gui_master_cntl = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS); - rinfo->dp_gui_mc_cache = rinfo->dp_gui_mc_base | - GMC_BRUSH_SOLID_COLOR | - GMC_SRC_DATATYPE_COLOR; - radeon_fifo_wait(rinfo, 1); - OUTREG(DP_GUI_MASTER_CNTL, rinfo->dp_gui_mc_cache); + radeon_fifo_wait (1); + OUTREG(DP_GUI_MASTER_CNTL, (rinfo->dp_gui_master_cntl | + GMC_BRUSH_SOLID_COLOR | + GMC_SRC_DATATYPE_COLOR)); + radeon_fifo_wait (7); /* clear line drawing regs */ - radeon_fifo_wait(rinfo, 2); OUTREG(DST_LINE_START, 0); OUTREG(DST_LINE_END, 0); - /* set brush and source color regs */ - rinfo->dp_brush_fg_cache = 0xffffffff; - rinfo->dp_brush_bg_cache = 0x00000000; - rinfo->dp_src_fg_cache = 0xffffffff; - rinfo->dp_src_bg_cache = 0x00000000; - radeon_fifo_wait(rinfo, 4); - OUTREG(DP_BRUSH_FRGD_CLR, rinfo->dp_brush_fg_cache); - OUTREG(DP_BRUSH_BKGD_CLR, rinfo->dp_brush_bg_cache); - OUTREG(DP_SRC_FRGD_CLR, rinfo->dp_src_fg_cache); - OUTREG(DP_SRC_BKGD_CLR, rinfo->dp_src_bg_cache); - - /* Default direction */ - rinfo->dp_cntl_cache = DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM; - radeon_fifo_wait(rinfo, 1); - OUTREG(DP_CNTL, rinfo->dp_cntl_cache); + /* set brush color regs */ + OUTREG(DP_BRUSH_FRGD_CLR, 0xffffffff); + OUTREG(DP_BRUSH_BKGD_CLR, 0x00000000); + + /* set source color regs */ + OUTREG(DP_SRC_FRGD_CLR, 0xffffffff); + OUTREG(DP_SRC_BKGD_CLR, 0x00000000); /* default write mask */ - radeon_fifo_wait(rinfo, 1); OUTREG(DP_WRITE_MSK, 0xffffffff); - /* Default to no swapping of host data */ - radeon_fifo_wait(rinfo, 1); - OUTREG(RBBM_GUICNTL, RBBM_GUICNTL_HOST_DATA_SWAP_NONE); - - /* Make sure it's settled */ - radeon_engine_idle(rinfo); + radeon_engine_idle (); } diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c index f343ba83f0ae..1a056adb61c8 100644 --- a/drivers/video/aty/radeon_backlight.c +++ b/drivers/video/aty/radeon_backlight.c @@ -66,7 +66,7 @@ static int radeon_bl_update_status(struct backlight_device *bd) level = bd->props.brightness; del_timer_sync(&rinfo->lvds_timer); - radeon_engine_idle(rinfo); + radeon_engine_idle(); lvds_gen_cntl = INREG(LVDS_GEN_CNTL); if (level > 0) { diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index b3ffe8205d2b..d0f1a7fc2c9d 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c @@ -852,6 +852,7 @@ static int radeonfb_pan_display (struct fb_var_screeninfo *var, if (rinfo->asleep) return 0; + radeon_fifo_wait(2); OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset) * var->bits_per_pixel / 8) & ~7); return 0; @@ -881,6 +882,7 @@ static int radeonfb_ioctl (struct fb_info *info, unsigned int cmd, if (rc) return rc; + radeon_fifo_wait(2); if (value & 0x01) { tmp = INREG(LVDS_GEN_CNTL); @@ -938,7 +940,7 @@ int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch) if (rinfo->lock_blank) return 0; - radeon_engine_idle(rinfo); + radeon_engine_idle(); val = INREG(CRTC_EXT_CNTL); val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS | @@ -1046,7 +1048,7 @@ static int radeonfb_blank (int blank, struct fb_info *info) if (rinfo->asleep) return 0; - + return radeon_screen_blank(rinfo, blank, 0); } @@ -1072,6 +1074,8 @@ static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green, pindex = regno; if (!rinfo->asleep) { + radeon_fifo_wait(9); + if (rinfo->bpp == 16) { pindex = regno * 8; @@ -1240,6 +1244,8 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg { int i; + radeon_fifo_wait(20); + /* Workaround from XFree */ if (rinfo->is_mobility) { /* A temporal workaround for the occational blanking on certain laptop @@ -1335,7 +1341,7 @@ static void radeon_lvds_timer_func(unsigned long data) { struct radeonfb_info *rinfo = (struct radeonfb_info *)data; - radeon_engine_idle(rinfo); + radeon_engine_idle(); OUTREG(LVDS_GEN_CNTL, rinfo->pending_lvds_gen_cntl); } @@ -1353,11 +1359,10 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode, if (nomodeset) return; - radeon_engine_idle(rinfo); - if (!regs_only) radeon_screen_blank(rinfo, FB_BLANK_NORMAL, 0); + radeon_fifo_wait(31); for (i=0; i<10; i++) OUTREG(common_regs[i].reg, common_regs[i].val); @@ -1385,6 +1390,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode, radeon_write_pll_regs(rinfo, mode); if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) { + radeon_fifo_wait(10); OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp); OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp); OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid); @@ -1399,6 +1405,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode, if (!regs_only) radeon_screen_blank(rinfo, FB_BLANK_UNBLANK, 0); + radeon_fifo_wait(2); OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl); return; @@ -1549,7 +1556,7 @@ static int radeonfb_set_par(struct fb_info *info) /* We always want engine to be idle on a mode switch, even * if we won't actually change the mode */ - radeon_engine_idle(rinfo); + radeon_engine_idle(); hSyncStart = mode->xres + mode->right_margin; hSyncEnd = hSyncStart + mode->hsync_len; @@ -1844,6 +1851,7 @@ static int radeonfb_set_par(struct fb_info *info) return 0; } + static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = radeonfb_check_var, @@ -1867,7 +1875,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo) info->par = rinfo; info->pseudo_palette = rinfo->pseudo_palette; info->flags = FBINFO_DEFAULT - | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_XPAN @@ -1875,7 +1882,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo) info->fbops = &radeonfb_ops; info->screen_base = rinfo->fb_base; info->screen_size = rinfo->mapped_vram; - /* Fill fix common fields */ strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id)); info->fix.smem_start = rinfo->fb_base_phys; @@ -1890,25 +1896,8 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo) info->fix.mmio_len = RADEON_REGSIZE; info->fix.accel = FB_ACCEL_ATI_RADEON; - /* Allocate colormap */ fb_alloc_cmap(&info->cmap, 256, 0); - /* Setup pixmap used for acceleration */ -#define PIXMAP_SIZE (2048 * 4) - - info->pixmap.addr = kmalloc(PIXMAP_SIZE, GFP_KERNEL); - if (!info->pixmap.addr) { - printk(KERN_ERR "radeonfb: Failed to allocate pixmap !\n"); - noaccel = 1; - goto bail; - } - info->pixmap.size = PIXMAP_SIZE; - info->pixmap.flags = FB_PIXMAP_SYSTEM; - info->pixmap.scan_align = 4; - info->pixmap.buf_align = 4; - info->pixmap.access_align = 32; - -bail: if (noaccel) info->flags |= FBINFO_HWACCEL_DISABLED; @@ -2017,6 +2006,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo) u32 tom = INREG(NB_TOM); tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024); + radeon_fifo_wait(6); OUTREG(MC_FB_LOCATION, tom); OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16); OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16); diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c index 3df5015f1d13..675abdafc2d8 100644 --- a/drivers/video/aty/radeon_pm.c +++ b/drivers/video/aty/radeon_pm.c @@ -2653,9 +2653,9 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) if (!(info->flags & FBINFO_HWACCEL_DISABLED)) { /* Make sure engine is reset */ - radeon_engine_idle(rinfo); + radeon_engine_idle(); radeonfb_engine_reset(rinfo); - radeon_engine_idle(rinfo); + radeon_engine_idle(); } /* Blank display and LCD */ @@ -2767,7 +2767,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev) rinfo->asleep = 0; } else - radeon_engine_idle(rinfo); + radeon_engine_idle(); /* Restore display & engine */ radeon_write_mode (rinfo, &rinfo->state, 1); diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h index ea0b5b47acaf..3ea1b00fdd22 100644 --- a/drivers/video/aty/radeonfb.h +++ b/drivers/video/aty/radeonfb.h @@ -336,15 +336,7 @@ struct radeonfb_info { int mon2_type; u8 *mon2_EDID; - /* accel bits */ - u32 dp_gui_mc_base; - u32 dp_gui_mc_cache; - u32 dp_cntl_cache; - u32 dp_brush_fg_cache; - u32 dp_brush_bg_cache; - u32 dp_src_fg_cache; - u32 dp_src_bg_cache; - u32 fifo_free; + u32 dp_gui_master_cntl; struct pll_info pll; @@ -356,7 +348,6 @@ struct radeonfb_info { int lock_blank; int dynclk; int no_schedule; - int gfx_mode; enum radeon_pm_mode pm_mode; reinit_function_ptr reinit_func; @@ -401,14 +392,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms) #define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr) #define INREG16(addr) readw((rinfo->mmio_base)+addr) #define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr) - -#ifdef CONFIG_PPC -#define INREG(addr) ({ eieio(); ld_le32(rinfo->mmio_base+(addr)); }) -#define OUTREG(addr,val) do { eieio(); st_le32(rinfo->mmio_base+(addr),(val)); } while(0) -#else #define INREG(addr) readl((rinfo->mmio_base)+addr) #define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr) -#endif static inline void _OUTREGP(struct radeonfb_info *rinfo, u32 addr, u32 val, u32 mask) @@ -550,7 +535,17 @@ static inline u32 radeon_get_dstbpp(u16 depth) * 2D Engine helper routines */ -extern void radeon_fifo_update_and_wait(struct radeonfb_info *rinfo, int entries); +static inline void _radeon_fifo_wait(struct radeonfb_info *rinfo, int entries) +{ + int i; + + for (i=0; i<2000000; i++) { + if ((INREG(RBBM_STATUS) & 0x7f) >= entries) + return; + udelay(1); + } + printk(KERN_ERR "radeonfb: FIFO Timeout !\n"); +} static inline void radeon_engine_flush (struct radeonfb_info *rinfo) { @@ -563,7 +558,7 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo) /* Ensure FIFO is empty, ie, make sure the flush commands * has reached the cache */ - radeon_fifo_update_and_wait(rinfo, 64); + _radeon_fifo_wait (rinfo, 64); /* Wait for the flush to complete */ for (i=0; i < 2000000; i++) { @@ -575,12 +570,12 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo) } -static inline void radeon_engine_idle(struct radeonfb_info *rinfo) +static inline void _radeon_engine_idle(struct radeonfb_info *rinfo) { int i; /* ensure FIFO is empty before waiting for idle */ - radeon_fifo_update_and_wait (rinfo, 64); + _radeon_fifo_wait (rinfo, 64); for (i=0; i<2000000; i++) { if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) { @@ -593,6 +588,8 @@ static inline void radeon_engine_idle(struct radeonfb_info *rinfo) } +#define radeon_engine_idle() _radeon_engine_idle(rinfo) +#define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries) #define radeon_msleep(ms) _radeon_msleep(rinfo,ms) @@ -622,7 +619,6 @@ extern void radeonfb_imageblit(struct fb_info *p, const struct fb_image *image); extern int radeonfb_sync(struct fb_info *info); extern void radeonfb_engine_init (struct radeonfb_info *rinfo); extern void radeonfb_engine_reset(struct radeonfb_info *rinfo); -extern void radeon_fixup_mem_offset(struct radeonfb_info *rinfo); /* Other functions */ extern int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch); diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 67ff370d80af..0b2adefe9e3d 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -3531,12 +3531,18 @@ static void fbcon_exit(void) softback_buf = 0UL; for (i = 0; i < FB_MAX; i++) { + int pending; + mapped = 0; info = registered_fb[i]; if (info == NULL) continue; + pending = cancel_work_sync(&info->queue); + DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" : + "no")); + for (j = first_fb_vc; j <= last_fb_vc; j++) { if (con2fb_map[j] == i) mapped = 1; diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c index 38718d95fbb9..fb64234a3825 100644 --- a/drivers/video/mb862xx/mb862xxfb.c +++ b/drivers/video/mb862xx/mb862xxfb.c @@ -927,9 +927,9 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev, } dev_dbg(dev, "fb phys 0x%llx 0x%lx\n", - (u64)par->fb_base_phys, (ulong)par->mapped_vram); + (unsigned long long)par->fb_base_phys, (ulong)par->mapped_vram); dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n", - (u64)par->mmio_base_phys, (ulong)par->mmio_len); + (unsigned long long)par->mmio_base_phys, (ulong)par->mmio_len); if (mb862xx_pci_gdc_init(par)) goto io_unmap; diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index 5a5e407dc45f..1a49519dafa4 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c @@ -392,7 +392,7 @@ static void set_fb_fix(struct fb_info *fbi) int bpp; rg = &plane->fbdev->mem_desc.region[plane->idx]; - fbi->screen_base = (char __iomem *)rg->vaddr; + fbi->screen_base = rg->vaddr; fix->smem_start = rg->paddr; fix->smem_len = rg->size; diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 26173a270e94..5b395a4ddfdf 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -392,7 +392,7 @@ static int iTCO_wdt_stop(void) /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */ val32 = inl(SMI_EN); - val32 &= 0x00002000; + val32 |= 0x00002000; outl(val32, SMI_EN); /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ |