summaryrefslogtreecommitdiff
path: root/drivers/ide
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-07-19 10:13:01 +0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 21:22:44 +0400
commit41e9d344bf52c57ec16648d08618b61d3f1d4bdc (patch)
tree7d55fc863731bc669fb9ff1d50ae7c5524ef91d6 /drivers/ide
parent275afcac9953ece0828972edeab9684cfe1a5ef3 (diff)
downloadlinux-41e9d344bf52c57ec16648d08618b61d3f1d4bdc.tar.xz
IDE: fix termination of non-fs requests
ide-disk calls ide_end_request(drive, 0, 0); to finish an unknown request, but this doesn't work so well for non-fs requests, since ide_end_request() internally looks at ->hard_cur_sectors to see how much data to end. Only file system requests store a transfer value in there, pc requests fill out ->data_len as a byte based transfer value instead. Since we ask to end 0 bytes of that request, it will never be terminated and ide-disk gets stuck in a loop "handling" that same request over and over. Switch __ide_end_request() to take a byte based transfer count, and adjust ide_end_request() to look at the right field to determine how much IO to end when it's being passed in 0. Acked-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Tested-By: Giacomo Catenazzi <cate@debian.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/ide')
-rw-r--r--drivers/ide/ide-io.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index c5b5011da56e..f9de79844418 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -55,7 +55,7 @@
#include <asm/bitops.h>
static int __ide_end_request(ide_drive_t *drive, struct request *rq,
- int uptodate, int nr_sectors)
+ int uptodate, unsigned int nr_bytes)
{
int ret = 1;
@@ -64,7 +64,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
* complete the whole request right now
*/
if (blk_noretry_request(rq) && end_io_error(uptodate))
- nr_sectors = rq->hard_nr_sectors;
+ nr_bytes = rq->hard_nr_sectors << 9;
if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
rq->errors = -EIO;
@@ -78,7 +78,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
HWGROUP(drive)->hwif->ide_dma_on(drive);
}
- if (!end_that_request_first(rq, uptodate, nr_sectors)) {
+ if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
add_disk_randomness(rq->rq_disk);
if (!list_empty(&rq->queuelist))
blkdev_dequeue_request(rq);
@@ -103,6 +103,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
{
+ unsigned int nr_bytes = nr_sectors << 9;
struct request *rq;
unsigned long flags;
int ret = 1;
@@ -114,10 +115,14 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
spin_lock_irqsave(&ide_lock, flags);
rq = HWGROUP(drive)->rq;
- if (!nr_sectors)
- nr_sectors = rq->hard_cur_sectors;
+ if (!nr_bytes) {
+ if (blk_pc_request(rq))
+ nr_bytes = rq->data_len;
+ else
+ nr_bytes = rq->hard_cur_sectors << 9;
+ }
- ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
+ ret = __ide_end_request(drive, rq, uptodate, nr_bytes);
spin_unlock_irqrestore(&ide_lock, flags);
return ret;