summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/battery.c9
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/char/serial167.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c637
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/ide/Kconfig14
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/pmac.c30
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/iop.c1
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/serial/mpc52xx_uart.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c2
19 files changed, 441 insertions, 306 deletions
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index a0a178dd189c..1423b0c0cd2e 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -174,15 +174,6 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = battery->current_now * 1000;
- /* if power units are mW, convert to mA by
- dividing by current voltage (mV/1000) */
- if (!battery->power_unit) {
- if (battery->voltage_now) {
- val->intval /= battery->voltage_now;
- val->intval *= 1000;
- } else
- val->intval = -1;
- }
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f20bf359b84f..edda7b6b077b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2790,7 +2790,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
return 0;
out_mem:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return ret;
@@ -2975,7 +2975,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
- blkdev_put(pd->bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
remove_proc_entry(pd->name, pkt_proc);
DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 3b23270eaa65..a8f15e6be594 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -418,7 +418,7 @@ static irqreturn_t cd2401_rxerr_interrupt(int irq, void *dev_id)
TTY_OVERRUN);
/*
If the flip buffer itself is
- overflowing, we still loose
+ overflowing, we still lose
the next incoming character.
*/
if (tty_buffer_request_room(tty, 1) !=
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4f39b9a0ec..adc972cc6bfc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -244,6 +244,10 @@ typedef struct drm_i915_private {
* List of objects currently involved in rendering from the
* ringbuffer.
*
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
@@ -253,6 +257,8 @@ typedef struct drm_i915_private {
* still have a write_domain which needs to be flushed before
* unbinding.
*
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
@@ -261,6 +267,8 @@ typedef struct drm_i915_private {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
@@ -371,8 +379,8 @@ struct drm_i915_gem_object {
uint32_t agp_type;
/**
- * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
- * GEM_DOMAIN_CPU is not in the object's read domain.
+ * If present, while GEM_DOMAIN_CPU is in the read domain this array
+ * flags which individual pages are valid.
*/
uint8_t *page_cpu_valid;
};
@@ -394,9 +402,6 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- /** Cache domains that were flushed at the start of the request. */
- uint32_t flush_domains;
-
struct list_head list;
};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d58ddef468f8..3fde82be014f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -33,21 +33,21 @@
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-static int
-i915_gem_object_set_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size,
- uint32_t read_domains,
- uint32_t write_domain);
-static int
-i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain);
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
+static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write);
+static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+ int write);
+static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size);
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
- I915_GEM_DOMAIN_CPU, 0);
+ ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+ args->size);
if (ret != 0) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
return ret;
}
- ret = i915_gem_set_domain(obj, file_priv,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
goto fail;
@@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_set_domain(obj, file_priv,
- I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
/**
- * Called when user space prepares to use an object
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj;
+ uint32_t read_domains = args->read_domains;
+ uint32_t write_domain = args->write_domain;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
+ /* Only handle setting domains to types used by the CPU. */
+ if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ return -EINVAL;
+
+ if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ return -EINVAL;
+
+ /* Having something in the write domain implies it's in the read
+ * domain, and only that read domain. Enforce that in the request.
+ */
+ if (write_domain != 0 && read_domains != write_domain)
+ return -EINVAL;
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
@@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
- obj, obj->size, args->read_domains, args->write_domain);
+ obj, obj->size, read_domains, write_domain);
#endif
- ret = i915_gem_set_domain(obj, file_priv,
- args->read_domains, args->write_domain);
+ if (read_domains & I915_GEM_DOMAIN_GTT) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+
+ /* Silently promote "you're not bound, there was nothing to do"
+ * to success, since the client was just asking us to
+ * make sure everything was done.
+ */
+ if (ret == -EINVAL)
+ ret = 0;
+ } else {
+ ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ }
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
/* Pinned buffers may be scanout, so flush the cache */
- if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- }
+ if (obj_priv->pin_count)
+ i915_gem_object_flush_cpu_write_domain(obj);
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
}
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->list,
&dev_priv->mm.active_list);
+ obj_priv->last_rendering_seqno = seqno;
}
+static void
+i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ BUG_ON(!obj_priv->active);
+ list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
+ obj_priv->last_rendering_seqno = 0;
+}
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
drm_gem_object_unreference(obj);
@@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->seqno = seqno;
request->emitted_jiffies = jiffies;
- request->flush_domains = flush_domains;
was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list);
+ /* Associate any objects on the flushing list matching the write
+ * domain we're flushing with our flush.
+ */
+ if (flush_domains != 0) {
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.flushing_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if ((obj->write_domain & flush_domains) ==
+ obj->write_domain) {
+ obj->write_domain = 0;
+ i915_gem_object_move_to_active(obj, seqno);
+ }
+ }
+
+ }
+
if (was_empty && !dev_priv->mm.suspended)
schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
return seqno;
@@ -676,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev,
__func__, request->seqno, obj);
#endif
- if (obj->write_domain != 0) {
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.flushing_list);
- } else {
+ if (obj->write_domain != 0)
+ i915_gem_object_move_to_flushing(obj);
+ else
i915_gem_object_move_to_inactive(obj);
- }
- }
-
- if (request->flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
-
- /* Clear the write domain and activity from any buffers
- * that are just waiting for a flush matching the one retired.
- */
- list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
-
- if (obj->write_domain & request->flush_domains) {
- obj->write_domain = 0;
- i915_gem_object_move_to_inactive(obj);
- }
- }
-
}
}
@@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
- /* If there are writes queued to the buffer, flush and
- * create a new seqno to wait for.
+ /* This function only exists to support waiting for existing rendering,
+ * not for emitting required flushes.
*/
- if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
- uint32_t write_domain = obj->write_domain;
-#if WATCH_BUF
- DRM_INFO("%s: flushing object %p from write domain %08x\n",
- __func__, obj, write_domain);
-#endif
- i915_gem_flush(dev, 0, write_domain);
-
- i915_gem_object_move_to_active(obj);
- obj_priv->last_rendering_seqno = i915_add_request(dev,
- write_domain);
- BUG_ON(obj_priv->last_rendering_seqno == 0);
-#if WATCH_LRU
- DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
-#endif
- }
+ BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
@@ -950,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return -EINVAL;
}
- /* Wait for any rendering to complete
- */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret) {
- DRM_ERROR("wait_rendering failed: %d\n", ret);
- return ret;
- }
-
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
* also ensure that all pending GPU writes are finished
* before we unbind.
*/
- ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
- I915_GEM_DOMAIN_CPU);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
- DRM_ERROR("set_domain failed: %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("set_domain failed: %d\n", ret);
return ret;
}
@@ -1083,6 +1095,19 @@ i915_gem_evict_something(struct drm_device *dev)
}
static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ int ret;
+
+ for (;;) {
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0)
+ break;
+ }
+ return ret;
+}
+
+static int
i915_gem_object_get_page_list(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1168,7 +1193,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev);
if (ret != 0) {
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
return ret;
}
goto search_free;
@@ -1228,6 +1254,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
}
+/** Flushes any GPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ uint32_t seqno;
+
+ if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ return;
+
+ /* Queue the GPU write cache flushing we need. */
+ i915_gem_flush(dev, 0, obj->write_domain);
+ seqno = i915_add_request(dev, obj->write_domain);
+ obj->write_domain = 0;
+ i915_gem_object_move_to_active(obj, seqno);
+}
+
+/** Flushes the GTT write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+{
+ if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+ return;
+
+ /* No actual flushing is required for the GTT write domain. Writes
+ * to it immediately go to main memory as far as we know, so there's
+ * no chipset flush. It also doesn't land in render cache.
+ */
+ obj->write_domain = 0;
+}
+
+/** Flushes the CPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+ return;
+
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return ret;
+
+ /* If we're writing through the GTT domain, then CPU and GPU caches
+ * will need to be invalidated at next use.
+ */
+ if (write)
+ obj->read_domains &= I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj_priv->dirty = 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return ret;
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ /* If we have a partially-valid cache of the object in the CPU,
+ * finish invalidating it and free the per-page flags.
+ */
+ i915_gem_object_set_to_full_cpu_read_domain(obj);
+
+ /* Flush the CPU cache if it's still invalid. */
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ }
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+
+ /* If we're writing through the CPU, then the GPU read domains will
+ * need to be invalidated at next use.
+ */
+ if (write) {
+ obj->read_domains &= I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ return 0;
+}
+
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
@@ -1339,16 +1502,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain)
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
- int ret;
+
+ BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
+ BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
#if WATCH_BUF
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@@ -1385,34 +1550,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
__func__, flush_domains, invalidate_domains);
#endif
- /*
- * If we're invaliding the CPU cache and flushing a GPU cache,
- * then pause for rendering so that the GPU caches will be
- * flushed before the cpu cache is invalidated
- */
- if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
- (flush_domains & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT))) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
i915_gem_clflush_object(obj);
}
if ((write_domain | flush_domains) != 0)
obj->write_domain = write_domain;
-
- /* If we're invalidating the CPU domain, clear the per-page CPU
- * domain list as well.
- */
- if (obj_priv->page_cpu_valid != NULL &&
- (write_domain != 0 ||
- read_domains & I915_GEM_DOMAIN_CPU)) {
- drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
- DRM_MEM_DRIVER);
- obj_priv->page_cpu_valid = NULL;
- }
obj->read_domains = read_domains;
dev->invalidate_domains |= invalidate_domains;
@@ -1423,47 +1565,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
- return 0;
}
/**
- * Set the read/write domain on a range of the object.
+ * Moves the object from a partially CPU read to a full one.
*
- * Currently only implemented for CPU reads, otherwise drops to normal
- * i915_gem_object_set_domain().
+ * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
+ * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
-static int
-i915_gem_object_set_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size,
- uint32_t read_domains,
- uint32_t write_domain)
+static void
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret, i;
- if (obj->read_domains & I915_GEM_DOMAIN_CPU)
- return 0;
+ if (!obj_priv->page_cpu_valid)
+ return;
- if (read_domains != I915_GEM_DOMAIN_CPU ||
- write_domain != 0)
- return i915_gem_object_set_domain(obj,
- read_domains, write_domain);
+ /* If we're partially in the CPU read domain, finish moving it in.
+ */
+ if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+ int i;
- /* Wait on any GPU rendering to the object to be flushed. */
+ for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
+ if (obj_priv->page_cpu_valid[i])
+ continue;
+ drm_clflush_pages(obj_priv->page_list + i, 1);
+ }
+ drm_agp_chipset_flush(dev);
+ }
+
+ /* Free the page_cpu_valid mappings which are now stale, whether
+ * or not we've got I915_GEM_DOMAIN_CPU.
+ */
+ drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
+ DRM_MEM_DRIVER);
+ obj_priv->page_cpu_valid = NULL;
+}
+
+/**
+ * Set the CPU read domain on a range of the object.
+ *
+ * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
+ * not entirely valid. The page_cpu_valid member of the object flags which
+ * pages have been flushed, and will be respected by
+ * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
+ * of the whole object.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ uint64_t offset, uint64_t size)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i, ret;
+
+ if (offset == 0 && size == obj->size)
+ return i915_gem_object_set_to_cpu_domain(obj, 0);
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
- if (ret)
+ if (ret != 0)
return ret;
+ i915_gem_object_flush_gtt_write_domain(obj);
+ /* If we're already fully in the CPU read domain, we're done. */
+ if (obj_priv->page_cpu_valid == NULL &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+ return 0;
+
+ /* Otherwise, create/clear the per-page CPU read domain flag if we're
+ * newly adding I915_GEM_DOMAIN_CPU
+ */
if (obj_priv->page_cpu_valid == NULL) {
obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
- }
+ if (obj_priv->page_cpu_valid == NULL)
+ return -ENOMEM;
+ } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
- for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
+ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
+ i++) {
if (obj_priv->page_cpu_valid[i])
continue;
@@ -1472,39 +1661,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
obj_priv->page_cpu_valid[i] = 1;
}
- return 0;
-}
-
-/**
- * Once all of the objects have been set in the proper domain,
- * perform the necessary flush and invalidate operations.
- *
- * Returns the write domains flushed, for use in flush tracking.
- */
-static uint32_t
-i915_gem_dev_set_domain(struct drm_device *dev)
-{
- uint32_t flush_domains = dev->flush_domains;
-
- /*
- * Now that all the buffers are synced to the proper domains,
- * flush and invalidate the collected domains
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
*/
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev,
- dev->invalidate_domains,
- dev->flush_domains);
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- }
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
- return flush_domains;
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+
+ return 0;
}
/**
@@ -1585,6 +1749,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
+ if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.read_domains,
+ reloc.write_domain);
+ return -EINVAL;
+ }
+
if (reloc.write_domain && target_obj->pending_write_domain &&
reloc.write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
@@ -1625,19 +1801,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue;
}
- /* Now that we're going to actually write some data in,
- * make sure that any rendering using this buffer's contents
- * is completed.
- */
- i915_gem_object_wait_rendering(obj);
-
- /* As we're writing through the gtt, flush
- * any CPU writes before we write the relocations
- */
- if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret != 0) {
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
}
/* Map the page containing the relocation we're going to
@@ -1779,6 +1947,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains;
+ int pin_tries;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -1827,14 +1996,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EBUSY;
}
- /* Zero the gloabl flush/invalidate flags. These
- * will be modified as each object is bound to the
- * gtt
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
-
- /* Look up object handles and perform the relocations */
+ /* Look up object handles */
for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle);
@@ -1844,17 +2006,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = -EBADF;
goto err;
}
+ }
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- file_priv,
- &exec_list[i]);
- if (ret) {
- DRM_ERROR("object bind and relocate failed %d\n", ret);
+ /* Pin and relocate */
+ for (pin_tries = 0; ; pin_tries++) {
+ ret = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ ret = i915_gem_object_pin_and_relocate(object_list[i],
+ file_priv,
+ &exec_list[i]);
+ if (ret)
+ break;
+ pinned = i + 1;
+ }
+ /* success */
+ if (ret == 0)
+ break;
+
+ /* error other than GTT full, or we've already tried again */
+ if (ret != -ENOMEM || pin_tries >= 1) {
+ DRM_ERROR("Failed to pin buffers %d\n", ret);
goto err;
}
- pinned = i + 1;
+
+ /* unpin all of our buffers */
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+
+ /* evict everyone we can from the aperture */
+ ret = i915_gem_evict_everything(dev);
+ if (ret)
+ goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
@@ -1864,21 +2048,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
+ /* Zero the global flush/invalidate flags. These
+ * will be modified as new domains are computed
+ * for each object
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- /* make sure all previous memory operations have passed */
- ret = i915_gem_object_set_domain(obj,
- obj->pending_read_domains,
- obj->pending_write_domain);
- if (ret)
- goto err;
+ /* Compute new gpu domains and update invalidate/flush */
+ i915_gem_object_set_to_gpu_domain(obj,
+ obj->pending_read_domains,
+ obj->pending_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
- /* Flush/invalidate caches and chipset buffer */
- flush_domains = i915_gem_dev_set_domain(dev);
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ if (dev->flush_domains)
+ (void)i915_add_request(dev, dev->flush_domains);
+ }
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -1898,8 +2098,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
~0);
#endif
- (void)i915_add_request(dev, flush_domains);
-
/* Exec the batchbuffer */
ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
if (ret) {
@@ -1927,10 +2125,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- i915_gem_object_move_to_active(obj);
- obj_priv->last_rendering_seqno = seqno;
+ i915_gem_object_move_to_active(obj, seqno);
#if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
@@ -2061,11 +2257,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
- if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
- }
+ i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj_priv->gtt_offset;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -2167,29 +2359,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
}
-static int
-i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain)
-{
- struct drm_device *dev = obj->dev;
- int ret;
- uint32_t flush_domains;
-
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
- if (ret)
- return ret;
- flush_domains = i915_gem_dev_set_domain(obj->dev);
-
- if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
- (void) i915_add_request(dev, flush_domains);
-
- return 0;
-}
-
/** Unbinds all objects that are on the given buffer list. */
static int
i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 93de15b4c9a7..e8d5abe1250e 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
- DRM_PROC_PRINT(" %d @ %d %08x\n",
+ DRM_PROC_PRINT(" %d @ %d\n",
gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies),
- gem_request->flush_domains);
+ (int) (jiffies - gem_request->emitted_jiffies));
}
if (len > request + offset)
return request;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e8b85ac4ca04..a8cb69469c64 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dcc & DCC_CHANNEL_XOR_DISABLE) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_I965GM(dev) || IS_GM45(dev)) {
- /* GM965 only does bit 11-based channel
- * randomization
+ } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
+ (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* GM965/GM45 does either bit 11 or bit 17
+ * swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e476eba36e6..9d24aaeb8a45 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -522,6 +522,7 @@
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
/** 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 6d7401772a8f..e6857e01d1ba 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -669,10 +669,12 @@ config BLK_DEV_CELLEB
endif
+# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
config BLK_DEV_IDE_PMAC
tristate "PowerMac on-board IDE support"
depends on PPC_PMAC && IDE=y
select IDE_TIMINGS
+ select BLK_DEV_IDEDMA_PCI
help
This driver provides support for the on-board IDE controller on
most of the recent Apple Power Macintoshes and PowerBooks.
@@ -689,16 +691,6 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
CD-ROM on hda. This option changes this to more natural hda for
hard disk and hdc for CD-ROM.
-config BLK_DEV_IDEDMA_PMAC
- bool "PowerMac IDE DMA support"
- depends on BLK_DEV_IDE_PMAC
- select BLK_DEV_IDEDMA_PCI
- help
- This option allows the driver for the on-board IDE controller on
- Power Macintoshes and PowerBooks to use DMA (direct memory access)
- to transfer data to and from memory. Saying Y is safe and improves
- performance.
-
config BLK_DEV_IDE_AU1XXX
bool "IDE for AMD Alchemy Au1200"
depends on SOC_AU1200
@@ -912,7 +904,7 @@ config BLK_DEV_UMC8672
endif
config BLK_DEV_IDEDMA
- def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \
+ def_bool BLK_DEV_IDEDMA_SFF || \
BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
endif # IDE
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 7d275b2af3eb..cc35d6dbd410 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -208,8 +208,10 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
*/
if (drive->hwif->dma_ops == NULL)
break;
- if (drive->dev_flags & IDE_DFLAG_USING_DMA)
- ide_set_dma(drive);
+ /*
+ * TODO: respect IDE_DFLAG_USING_DMA
+ */
+ ide_set_dma(drive);
break;
}
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 2e19d6298536..7c481bb56fab 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -66,7 +66,6 @@ typedef struct pmac_ide_hwif {
struct macio_dev *mdev;
u32 timings[4];
volatile u32 __iomem * *kauai_fcr;
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* Those fields are duplicating what is in hwif. We currently
* can't use the hwif ones because of some assumptions that are
* beeing done by the generic code about the kind of dma controller
@@ -74,8 +73,6 @@ typedef struct pmac_ide_hwif {
*/
volatile struct dbdma_regs __iomem * dma_regs;
struct dbdma_cmd* dma_table_cpu;
-#endif
-
} pmac_ide_hwif_t;
enum {
@@ -222,8 +219,6 @@ static const char* model_name[] = {
#define KAUAI_FCR_UATA_RESET_N 0x00000002
#define KAUAI_FCR_UATA_ENABLE 0x00000001
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
-
/* Rounded Multiword DMA timings
*
* I gave up finding a generic formula for all controller
@@ -413,8 +408,6 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
static void pmac_ide_selectproc(ide_drive_t *drive);
static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
-
#define PMAC_IDE_REG(x) \
((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
@@ -584,8 +577,6 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
pmac_ide_do_update_timings(drive);
}
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
-
/*
* Calculate KeyLargo ATA/66 UDMA timings
*/
@@ -786,7 +777,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
drive->name, speed & 0xf, *timings);
#endif
}
-#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
{
@@ -804,7 +794,6 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
tl[0] = *timings;
tl[1] = *timings2;
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
if (speed >= XFER_UDMA_0) {
if (pmif->kind == controller_kl_ata4)
ret = set_timings_udma_ata4(&tl[0], speed);
@@ -817,7 +806,7 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
ret = -1;
} else
set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
+
if (ret)
return;
@@ -1008,9 +997,7 @@ static const struct ide_port_info pmac_port_info = {
.chipset = ide_pmac,
.tp_ops = &pmac_tp_ops,
.port_ops = &pmac_ide_port_ops,
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
.dma_ops = &pmac_dma_ops,
-#endif
.host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_MMIO |
@@ -1182,7 +1169,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
pmif->regbase = regbase;
pmif->irq = irq;
pmif->kauai_fcr = NULL;
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
+
if (macio_resource_count(mdev) >= 2) {
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide-pmac: can't request DMA "
@@ -1192,7 +1179,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
pmif->dma_regs = NULL;
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
+
dev_set_drvdata(&mdev->ofdev.dev, pmif);
memset(&hw, 0, sizeof(hw));
@@ -1300,9 +1287,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
base = ioremap(rbase, rlen);
pmif->regbase = (unsigned long) base + 0x2000;
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
pmif->dma_regs = base + 0x1000;
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
pmif->kauai_fcr = base;
pmif->irq = pdev->irq;
@@ -1434,8 +1419,6 @@ out:
return error;
}
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
-
/*
* pmac_ide_build_dmatable builds the DBDMA command list
* for a transfer and sets the DBDMA channel to point to it.
@@ -1723,13 +1706,6 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
return 0;
}
-#else
-static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- return -EOPNOTSUPP;
-}
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
module_init(pmac_ide_probe);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a63161aec487..04e5fd742c2c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -668,7 +668,7 @@ static void check_for_valid_limits(struct io_restrictions *rs)
if (!rs->max_segment_size)
rs->max_segment_size = MAX_SEGMENT_SIZE;
if (!rs->seg_boundary_mask)
- rs->seg_boundary_mask = -1;
+ rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
if (!rs->bounce_pfn)
rs->bounce_pfn = -1;
}
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 84bdc2ee69e6..a443e136dc41 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -354,7 +354,7 @@ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
* @req: the request to prepare
*
* Allocate the necessary i2o_block_request struct and connect it to
- * the request. This is needed that we not loose the SG list later on.
+ * the request. This is needed that we not lose the SG list later on.
*
* Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
*/
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index be2b5926d26c..6e53a30bfd38 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -49,7 +49,6 @@ static int i2o_hrt_get(struct i2o_controller *c);
/**
* i2o_msg_get_wait - obtain an I2O message from the IOP
* @c: I2O controller
- * @msg: pointer to a I2O message pointer
* @wait: how long to wait until timeout
*
* This function waits up to wait seconds for a message slot to be
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c9e1242eaf25..5081b3981d3c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -757,7 +757,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
* access to the device is prohibited.
*/
error = scsi_nonblockable_ioctl(sdp, cmd, p,
- (mode & FMODE_NDELAY_NOW) != 0);
+ (mode & FMODE_NDELAY) != 0);
if (!scsi_block_when_processing_errors(sdp) || !error)
return error;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 62b6633e3a97..45b66b98a516 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -521,7 +521,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
* if it doesn't recognise the ioctl
*/
ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
- (mode & FMODE_NDELAY_NOW) != 0);
+ (mode & FMODE_NDELAY) != 0);
if (ret != -ENODEV)
return ret;
return scsi_ioctl(sdev, cmd, argp);
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 6117d3db0b66..28c00c3d58f5 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -591,8 +591,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
/* Update the per-port timeout */
uart_update_timeout(port, new->c_cflag, baud);
- /* Do our best to flush TX & RX, so we don't loose anything */
- /* But we don't wait indefinitly ! */
+ /* Do our best to flush TX & RX, so we don't lose anything */
+ /* But we don't wait indefinitely ! */
j = 5000000; /* Maximum wait */
/* FIXME Can't receive chars since set_termios might be called at early
* boot for the console, all stuff is not yet ready to receive at that
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 51d7bdea2869..aad1359a3eb1 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1498,7 +1498,7 @@ static int ftdi_open(struct tty_struct *tty,
priv->interface, buf, 0, WDR_TIMEOUT);
/* Termios defaults are set by usb_serial_init. We don't change
- port->tty->termios - this would loose speed settings, etc.
+ port->tty->termios - this would lose speed settings, etc.
This is same behaviour as serial.c/rs_open() - Kuba */
/* ftdi_set_termios will send usb control messages */
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 26173a270e94..5b395a4ddfdf 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -392,7 +392,7 @@ static int iTCO_wdt_stop(void)
/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
val32 = inl(SMI_EN);
- val32 &= 0x00002000;
+ val32 |= 0x00002000;
outl(val32, SMI_EN);
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */