summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig6
-rw-r--r--block/as-iosched.c24
-rw-r--r--block/blktrace.c53
-rw-r--r--block/cfq-iosched.c67
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c30
-rw-r--r--block/genhd.c31
-rw-r--r--block/ioctl.c6
-rw-r--r--block/ll_rw_blk.c285
-rw-r--r--block/noop-iosched.c2
-rw-r--r--block/scsi_ioctl.c52
11 files changed, 386 insertions, 172 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 83766a6bdee2..a50f48111647 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -19,11 +19,9 @@ config BLOCK
if BLOCK
-#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
-#for instance.
config LBD
bool "Support for Large Block Devices"
- depends on X86 || (MIPS && 32BIT) || PPC32 || (S390 && !64BIT) || SUPERH || UML
+ depends on !64BIT
help
Say Y here if you want to attach large (bigger than 2TB) discs to
your machine, or if you want to have a raid or loopback device
@@ -44,7 +42,7 @@ config BLK_DEV_IO_TRACE
config LSF
bool "Support for Large Single Files"
- depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
+ depends on !64BIT
help
Say Y here if you want to be able to handle very large files (bigger
than 2TB), otherwise say N.
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 50b95e4c1425..ef126277b4b3 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
*
* FIXME! dispatch queue is not a queue at all!
*/
-static void as_work_handler(void *data)
+static void as_work_handler(struct work_struct *work)
{
- struct request_queue *q = data;
+ struct as_data *ad = container_of(work, struct as_data, antic_work);
+ struct request_queue *q = ad->q;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@@ -1317,7 +1318,7 @@ static void as_exit_queue(elevator_t *e)
/*
* initialize elevator private data (as_data).
*/
-static void *as_init_queue(request_queue_t *q, elevator_t *e)
+static void *as_init_queue(request_queue_t *q)
{
struct as_data *ad;
@@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
ad->antic_timer.function = as_antic_timeout;
ad->antic_timer.data = (unsigned long)q;
init_timer(&ad->antic_timer);
- INIT_WORK(&ad->antic_work, as_work_handler, q);
+ INIT_WORK(&ad->antic_work, as_work_handler);
INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
@@ -1461,20 +1462,7 @@ static struct elevator_type iosched_as = {
static int __init as_init(void)
{
- int ret;
-
- ret = elv_register(&iosched_as);
- if (!ret) {
- /*
- * don't allow AS to get unregistered, since we would have
- * to browse all tasks in the system and release their
- * as_io_context first
- */
- __module_get(THIS_MODULE);
- return 0;
- }
-
- return ret;
+ return elv_register(&iosched_as);
}
static void __exit as_exit(void)
diff --git a/block/blktrace.c b/block/blktrace.c
index 135593c8e45b..d3679dd1d220 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -22,32 +22,60 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
+#include <linux/time.h>
#include <asm/uaccess.h>
static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
static unsigned int blktrace_seq __read_mostly = 1;
/*
- * Send out a notify for this process, if we haven't done so since a trace
- * started
+ * Send out a notify message.
*/
-static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
+static void trace_note(struct blk_trace *bt, pid_t pid, int action,
+ const void *data, size_t len)
{
struct blk_io_trace *t;
- t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm));
+ t = relay_reserve(bt->rchan, sizeof(*t) + len);
if (t) {
+ const int cpu = smp_processor_id();
+
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+ t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
t->device = bt->dev;
- t->action = BLK_TC_ACT(BLK_TC_NOTIFY);
- t->pid = tsk->pid;
- t->cpu = smp_processor_id();
- t->pdu_len = sizeof(tsk->comm);
- memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len);
- tsk->btrace_seq = blktrace_seq;
+ t->action = action;
+ t->pid = pid;
+ t->cpu = cpu;
+ t->pdu_len = len;
+ memcpy((void *) t + sizeof(*t), data, len);
}
}
+/*
+ * Send out a notify for this process, if we haven't done so since a trace
+ * started
+ */
+static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
+{
+ tsk->btrace_seq = blktrace_seq;
+ trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
+}
+
+static void trace_note_time(struct blk_trace *bt)
+{
+ struct timespec now;
+ unsigned long flags;
+ u32 words[2];
+
+ getnstimeofday(&now);
+ words[0] = now.tv_sec;
+ words[1] = now.tv_nsec;
+
+ local_irq_save(flags);
+ trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
+ local_irq_restore(flags);
+}
+
static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
pid_t pid)
{
@@ -366,8 +394,7 @@ err:
if (bt) {
if (bt->dropped_file)
debugfs_remove(bt->dropped_file);
- if (bt->sequence)
- free_percpu(bt->sequence);
+ free_percpu(bt->sequence);
if (bt->rchan)
relay_close(bt->rchan);
kfree(bt);
@@ -394,6 +421,8 @@ static int blk_trace_startstop(request_queue_t *q, int start)
blktrace_seq++;
smp_mb();
bt->trace_state = Blktrace_running;
+
+ trace_note_time(bt);
ret = 0;
}
} else {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1d9c3c70a9a0..07b706243772 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -43,8 +43,8 @@ static int cfq_slice_idle = HZ / 125;
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2)
-static kmem_cache_t *cfq_pool;
-static kmem_cache_t *cfq_ioc_pool;
+static struct kmem_cache *cfq_pool;
+static struct kmem_cache *cfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;
@@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q)
return !cfqd->busy_queues;
}
-static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
{
- if (rw == READ || rw == WRITE_SYNC)
+ /*
+ * Use the per-process queue, for read requests and syncronous writes
+ */
+ if (!(rw & REQ_RW) || is_sync)
return task->pid;
return CFQ_KEY_ASYNC;
@@ -473,7 +476,7 @@ static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{
struct task_struct *tsk = current;
- pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
+ pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
struct cfq_queue *cfqq;
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
@@ -565,6 +568,33 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
cfq_remove_request(next);
}
+static int cfq_allow_merge(request_queue_t *q, struct request *rq,
+ struct bio *bio)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ const int rw = bio_data_dir(bio);
+ struct cfq_queue *cfqq;
+ pid_t key;
+
+ /*
+ * Disallow merge of a sync bio into an async request.
+ */
+ if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
+ return 0;
+
+ /*
+ * Lookup the cfqq that this bio will be queued with. Allow
+ * merge only if rq is queued there.
+ */
+ key = cfq_queue_pid(current, rw, bio_sync(bio));
+ cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
+
+ if (cfqq == RQ_CFQQ(rq))
+ return 1;
+
+ return 0;
+}
+
static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
@@ -1464,8 +1494,7 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
}
static void
-cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
- struct request *rq)
+cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
{
sector_t sdist;
u64 total;
@@ -1617,7 +1646,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
cfq_update_io_thinktime(cfqd, cic);
- cfq_update_io_seektime(cfqd, cic, rq);
+ cfq_update_io_seektime(cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_queue = jiffies;
@@ -1749,6 +1778,9 @@ static int cfq_may_queue(request_queue_t *q, int rw)
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
struct cfq_queue *cfqq;
+ unsigned int key;
+
+ key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
/*
* don't force setup of a queue from here, as a call to may_queue
@@ -1756,7 +1788,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
* so just lookup a possibly existing queue, or return 'may queue'
* if that fails
*/
- cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
+ cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
if (cfqq) {
cfq_init_prio_data(cfqq);
cfq_prio_boost(cfqq);
@@ -1770,7 +1802,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
/*
* queue lock held here
*/
-static void cfq_put_request(request_queue_t *q, struct request *rq)
+static void cfq_put_request(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1799,10 +1831,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
struct task_struct *tsk = current;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
- pid_t key = cfq_queue_pid(tsk, rw);
+ const int is_sync = rq_is_sync(rq);
+ pid_t key = cfq_queue_pid(tsk, rw, is_sync);
struct cfq_queue *cfqq;
unsigned long flags;
- int is_sync = key != CFQ_KEY_ASYNC;
might_sleep_if(gfp_mask & __GFP_WAIT);
@@ -1841,9 +1873,11 @@ queue_fail:
return 1;
}
-static void cfq_kick_queue(void *data)
+static void cfq_kick_queue(struct work_struct *work)
{
- request_queue_t *q = data;
+ struct cfq_data *cfqd =
+ container_of(work, struct cfq_data, unplug_work);
+ request_queue_t *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@@ -1951,7 +1985,7 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
-static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q)
{
struct cfq_data *cfqd;
int i;
@@ -1987,7 +2021,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->idle_class_timer.function = cfq_idle_class_timer;
cfqd->idle_class_timer.data = (unsigned long) cfqd;
- INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
@@ -2118,6 +2152,7 @@ static struct elevator_type iosched_cfq = {
.elevator_merge_fn = cfq_merge,
.elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests,
+ .elevator_allow_merge_fn = cfq_allow_merge,
.elevator_dispatch_fn = cfq_dispatch_requests,
.elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request,
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index b7c5b34cb7b4..6d673e938d3e 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e)
/*
* initialize elevator private data (deadline_data).
*/
-static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
+static void *deadline_init_queue(request_queue_t *q)
{
struct deadline_data *dd;
diff --git a/block/elevator.c b/block/elevator.c
index 8ccd163254b8..536be740ba4e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -51,6 +51,21 @@ static const int elv_hash_shift = 6;
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
/*
+ * Query io scheduler to see if the current process issuing bio may be
+ * merged with rq.
+ */
+static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
+{
+ request_queue_t *q = rq->q;
+ elevator_t *e = q->elevator;
+
+ if (e->ops->elevator_allow_merge_fn)
+ return e->ops->elevator_allow_merge_fn(q, rq, bio);
+
+ return 1;
+}
+
+/*
* can we safely merge with this request?
*/
inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
@@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
return 0;
/*
- * same device and no special stuff set, merge is ok
+ * must be same device and not a special request
*/
- if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special)
- return 1;
+ if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
+ return 0;
- return 0;
+ if (!elv_iosched_allow_merge(rq, bio))
+ return 0;
+
+ return 1;
}
EXPORT_SYMBOL(elv_rq_merge_ok);
@@ -129,7 +147,7 @@ static struct elevator_type *elevator_get(const char *name)
static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
{
- return eq->ops->elevator_init_fn(q, eq);
+ return eq->ops->elevator_init_fn(q);
}
static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
@@ -810,7 +828,7 @@ void elv_put_request(request_queue_t *q, struct request *rq)
elevator_t *e = q->elevator;
if (e->ops->elevator_put_req_fn)
- e->ops->elevator_put_req_fn(q, rq);
+ e->ops->elevator_put_req_fn(rq);
}
int elv_may_queue(request_queue_t *q, int rw)
diff --git a/block/genhd.c b/block/genhd.c
index 653919d50cd4..457fdac4c17d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -417,6 +417,34 @@ static struct disk_attribute disk_attr_stat = {
.show = disk_stats_read
};
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+
+static ssize_t disk_fail_store(struct gendisk * disk,
+ const char *buf, size_t count)
+{
+ int i;
+
+ if (count > 0 && sscanf(buf, "%d", &i) > 0) {
+ if (i == 0)
+ disk->flags &= ~GENHD_FL_FAIL;
+ else
+ disk->flags |= GENHD_FL_FAIL;
+ }
+
+ return count;
+}
+static ssize_t disk_fail_read(struct gendisk * disk, char *page)
+{
+ return sprintf(page, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
+}
+static struct disk_attribute disk_attr_fail = {
+ .attr = {.name = "make-it-fail", .mode = S_IRUGO | S_IWUSR },
+ .store = disk_fail_store,
+ .show = disk_fail_read
+};
+
+#endif
+
static struct attribute * default_attrs[] = {
&disk_attr_uevent.attr,
&disk_attr_dev.attr,
@@ -424,6 +452,9 @@ static struct attribute * default_attrs[] = {
&disk_attr_removable.attr,
&disk_attr_size.attr,
&disk_attr_stat.attr,
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ &disk_attr_fail.attr,
+#endif
NULL,
};
diff --git a/block/ioctl.c b/block/ioctl.c
index 58aab630dfc1..f6962b64660e 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -72,7 +72,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
bdevp = bdget_disk(disk, part);
if (!bdevp)
return -ENOMEM;
- mutex_lock_nested(&bdevp->bd_mutex, BD_MUTEX_PARTITION);
+ mutex_lock(&bdevp->bd_mutex);
if (bdevp->bd_openers) {
mutex_unlock(&bdevp->bd_mutex);
bdput(bdevp);
@@ -82,7 +82,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
fsync_bdev(bdevp);
invalidate_bdev(bdevp, 0);
- mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_WHOLE);
+ mutex_lock(&bdev->bd_mutex);
delete_partition(disk, part);
mutex_unlock(&bdev->bd_mutex);
mutex_unlock(&bdevp->bd_mutex);
@@ -290,7 +290,7 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
ENOIOCTLCMD for unknown ioctls. */
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
- struct block_device *bdev = file->f_dentry->d_inode->i_bdev;
+ struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
int ret = -ENOIOCTLCMD;
if (disk->fops->compat_ioctl) {
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 9eaee6640535..fb6789725e1b 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -25,16 +25,18 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/blktrace_api.h>
+#include <linux/fault-inject.h>
/*
* for max sense size
*/
#include <scsi/scsi_cmnd.h>
-static void blk_unplug_work(void *data);
+static void blk_unplug_work(struct work_struct *work);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
@@ -44,17 +46,17 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/*
* For the allocated request tables
*/
-static kmem_cache_t *request_cachep;
+static struct kmem_cache *request_cachep;
/*
* For queue allocation
*/
-static kmem_cache_t *requestq_cachep;
+static struct kmem_cache *requestq_cachep;
/*
* For io context allocations
*/
-static kmem_cache_t *iocontext_cachep;
+static struct kmem_cache *iocontext_cachep;
/*
* Controlling structure to kblockd
@@ -127,13 +129,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
-void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
-{
- q->activity_fn = fn;
- q->activity_data = data;
-}
-EXPORT_SYMBOL(blk_queue_activity_fn);
-
/**
* blk_queue_prep_rq - set a prepare_request function for queue
* @q: queue
@@ -227,7 +222,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
if (q->unplug_delay == 0)
q->unplug_delay = 1;
- INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+ INIT_WORK(&q->unplug_work, blk_unplug_work);
q->unplug_timer.function = blk_unplug_timeout;
q->unplug_timer.data = (unsigned long)q;
@@ -236,8 +231,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
* by default assume old behaviour and bounce for any highmem page
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
-
- blk_queue_activity_fn(q, NULL, NULL);
}
EXPORT_SYMBOL(blk_queue_make_request);
@@ -1412,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
return 1;
}
-static int ll_back_merge_fn(request_queue_t *q, struct request *req,
- struct bio *bio)
+int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
{
unsigned short max_sectors;
int len;
@@ -1449,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
return ll_new_hw_segment(q, req, bio);
}
+EXPORT_SYMBOL(ll_back_merge_fn);
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
@@ -1631,9 +1624,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
}
}
-static void blk_unplug_work(void *data)
+static void blk_unplug_work(struct work_struct *work)
{
- request_queue_t *q = data;
+ request_queue_t *q = container_of(work, request_queue_t, unplug_work);
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1919,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
}
q->request_fn = rfn;
- q->back_merge_fn = ll_back_merge_fn;
- q->front_merge_fn = ll_front_merge_fn;
- q->merge_requests_fn = ll_merge_requests_fn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
@@ -2065,15 +2055,16 @@ static void freed_request(request_queue_t *q, int rw, int priv)
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
*/
-static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
- gfp_t gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
+ const int rw = rw_flags & 0x01;
int may_queue, priv;
- may_queue = elv_may_queue(q, rw);
+ may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
@@ -2121,7 +2112,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, priv, gfp_mask);
+ rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything
@@ -2169,12 +2160,13 @@ out:
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw,
+static struct request *get_request_wait(request_queue_t *q, int rw_flags,
struct bio *bio)
{
+ const int rw = rw_flags & 0x01;
struct request *rq;
- rq = get_request(q, rw, bio, GFP_NOIO);
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;
@@ -2182,7 +2174,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw, bio, GFP_NOIO);
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
if (!rq) {
struct io_context *ioc;
@@ -2322,6 +2314,73 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
EXPORT_SYMBOL(blk_insert_request);
+static int __blk_rq_unmap_user(struct bio *bio)
+{
+ int ret = 0;
+
+ if (bio) {
+ if (bio_flagged(bio, BIO_USER_MAPPED))
+ bio_unmap_user(bio);
+ else
+ ret = bio_uncopy_user(bio);
+ }
+
+ return ret;
+}
+
+static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+ void __user *ubuf, unsigned int len)
+{
+ unsigned long uaddr;
+ struct bio *bio, *orig_bio;
+ int reading, ret;
+
+ reading = rq_data_dir(rq) == READ;
+
+ /*
+ * if alignment requirement is satisfied, map in user pages for
+ * direct dma. else, set up kernel bounce buffers
+ */
+ uaddr = (unsigned long) ubuf;
+ if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+ bio = bio_map_user(q, NULL, uaddr, len, reading);
+ else
+ bio = bio_copy_user(q, uaddr, len, reading);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ orig_bio = bio;
+ blk_queue_bounce(q, &bio);
+
+ /*
+ * We link the bounce buffer in and could have to traverse it
+ * later so we have to get a ref to prevent it from being freed
+ */
+ bio_get(bio);
+
+ if (!rq->bio)
+ blk_rq_bio_prep(q, rq, bio);
+ else if (!ll_back_merge_fn(q, rq, bio)) {
+ ret = -EINVAL;
+ goto unmap_bio;
+ } else {
+ rq->biotail->bi_next = bio;
+ rq->biotail = bio;
+
+ rq->data_len += bio->bi_size;
+ }
+
+ return bio->bi_size;
+
+unmap_bio:
+ /* if it was boucned we must call the end io function */
+ bio_endio(bio, bio->bi_size, 0);
+ __blk_rq_unmap_user(orig_bio);
+ bio_put(bio);
+ return ret;
+}
+
/**
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
@@ -2343,42 +2402,47 @@ EXPORT_SYMBOL(blk_insert_request);
* unmapping.
*/
int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
- unsigned int len)
+ unsigned long len)
{
- unsigned long uaddr;
- struct bio *bio;
- int reading;
+ unsigned long bytes_read = 0;
+ struct bio *bio = NULL;
+ int ret;
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !ubuf)
return -EINVAL;
- reading = rq_data_dir(rq) == READ;
+ while (bytes_read != len) {
+ unsigned long map_len, end, start;
- /*
- * if alignment requirement is satisfied, map in user pages for
- * direct dma. else, set up kernel bounce buffers
- */
- uaddr = (unsigned long) ubuf;
- if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
- bio = bio_map_user(q, NULL, uaddr, len, reading);
- else
- bio = bio_copy_user(q, uaddr, len, reading);
+ map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+ end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
+ start = (unsigned long)ubuf >> PAGE_SHIFT;
- if (!IS_ERR(bio)) {
- rq->bio = rq->biotail = bio;
- blk_rq_bio_prep(q, rq, bio);
+ /*
+ * A bad offset could cause us to require BIO_MAX_PAGES + 1
+ * pages. If this happens we just lower the requested
+ * mapping len by a page so that we can fit
+ */
+ if (end - start > BIO_MAX_PAGES)
+ map_len -= PAGE_SIZE;
- rq->buffer = rq->data = NULL;
- rq->data_len = len;
- return 0;
+ ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+ if (ret < 0)
+ goto unmap_rq;
+ if (!bio)
+ bio = rq->bio;
+ bytes_read += ret;
+ ubuf += ret;
}
- /*
- * bio is the err-ptr
- */
- return PTR_ERR(bio);
+ rq->buffer = rq->data = NULL;
+ return 0;
+unmap_rq:
+ blk_rq_unmap_user(bio);
+ return ret;
}
EXPORT_SYMBOL(blk_rq_map_user);
@@ -2389,6 +2453,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
* @rq: request to map data to
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
+ * @len: I/O byte count
*
* Description:
* Data will be mapped directly for zero copy io, if possible. Otherwise
@@ -2404,7 +2469,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
- struct sg_iovec *iov, int iov_count)
+ struct sg_iovec *iov, int iov_count, unsigned int len)
{
struct bio *bio;
@@ -2418,10 +2483,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);
- rq->bio = rq->biotail = bio;
+ if (bio->bi_size != len) {
+ bio_endio(bio, bio->bi_size, 0);
+ bio_unmap_user(bio);
+ return -EINVAL;
+ }
+
+ bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = rq->data = NULL;
- rq->data_len = bio->bi_size;
return 0;
}
@@ -2429,24 +2499,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
/**
* blk_rq_unmap_user - unmap a request with user data
- * @bio: bio to be unmapped
- * @ulen: length of user buffer
+ * @bio: start of bio list
*
* Description:
- * Unmap a bio previously mapped by blk_rq_map_user().
+ * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
+ * supply the original rq->bio from the blk_rq_map_user() return, since
+ * the io completion may have changed rq->bio.
*/
-int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
+int blk_rq_unmap_user(struct bio *bio)
{
- int ret = 0;
+ struct bio *mapped_bio;
+ int ret = 0, ret2;
- if (bio) {
- if (bio_flagged(bio, BIO_USER_MAPPED))
- bio_unmap_user(bio);
- else
- ret = bio_uncopy_user(bio);
+ while (bio) {
+ mapped_bio = bio;
+ if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
+ mapped_bio = bio->bi_private;
+
+ ret2 = __blk_rq_unmap_user(mapped_bio);
+ if (ret2 && !ret)
+ ret = ret2;
+
+ mapped_bio = bio;
+ bio = bio->bi_next;
+ bio_put(mapped_bio);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL(blk_rq_unmap_user);
@@ -2476,11 +2555,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);
- rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);
-
rq->buffer = rq->data = NULL;
- rq->data_len = len;
return 0;
}
@@ -2609,9 +2685,6 @@ static inline void add_request(request_queue_t * q, struct request * req)
{
drive_stat_acct(req, req->nr_sectors, 1);
- if (q->activity_fn)
- q->activity_fn(q->activity_data, rq_data_dir(req));
-
/*
* elevator indicated where it wants this request to be
* inserted at elevator_merge time
@@ -2745,7 +2818,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
* will have updated segment counts, update sector
* counts here.
*/
- if (!q->merge_requests_fn(q, req, next))
+ if (!ll_merge_requests_fn(q, req, next))
return 0;
/*
@@ -2835,6 +2908,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
int el_ret, nr_sectors, barrier, err;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
+ int rw_flags;
nr_sectors = bio_sectors(bio);
@@ -2861,7 +2935,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
case ELEVATOR_BACK_MERGE:
BUG_ON(!rq_mergeable(req));
- if (!q->back_merge_fn(q, req, bio))
+ if (!ll_back_merge_fn(q, req, bio))
break;
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
@@ -2878,7 +2952,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
case ELEVATOR_FRONT_MERGE:
BUG_ON(!rq_mergeable(req));
- if (!q->front_merge_fn(q, req, bio))
+ if (!ll_front_merge_fn(q, req, bio))
break;
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
@@ -2909,10 +2983,19 @@ static int __make_request(request_queue_t *q, struct bio *bio)
get_rq:
/*
+ * This sync check and mask will be re-done in init_request_from_bio(),
+ * but we need to set it earlier to expose the sync flag to the
+ * rq allocator and io schedulers.
+ */
+ rw_flags = bio_data_dir(bio);
+ if (sync)
+ rw_flags |= REQ_RW_SYNC;
+
+ /*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request_wait(q, bio_data_dir(bio), bio);
+ req = get_request_wait(q, rw_flags, bio);
/*
* After dropping the lock and possibly sleeping here, our request
@@ -2971,6 +3054,42 @@ static void handle_bad_sector(struct bio *bio)
set_bit(BIO_EOF, &bio->bi_flags);
}
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+
+static DECLARE_FAULT_ATTR(fail_make_request);
+
+static int __init setup_fail_make_request(char *str)
+{
+ return setup_fault_attr(&fail_make_request, str);
+}
+__setup("fail_make_request=", setup_fail_make_request);
+
+static int should_fail_request(struct bio *bio)
+{
+ if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
+ (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
+ return should_fail(&fail_make_request, bio->bi_size);
+
+ return 0;
+}
+
+static int __init fail_make_request_debugfs(void)
+{
+ return init_fault_attr_dentries(&fail_make_request,
+ "fail_make_request");
+}
+
+late_initcall(fail_make_request_debugfs);
+
+#else /* CONFIG_FAIL_MAKE_REQUEST */
+
+static inline int should_fail_request(struct bio *bio)
+{
+ return 0;
+}
+
+#endif /* CONFIG_FAIL_MAKE_REQUEST */
+
/**
* generic_make_request: hand a buffer to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
@@ -3056,6 +3175,9 @@ end_io:
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
goto end_io;
+ if (should_fail_request(bio))
+ goto end_io;
+
/*
* If this device has partitions, remap block n
* of partition p to block n+start(p) of the disk.
@@ -3110,10 +3232,12 @@ void submit_bio(int rw, struct bio *bio)
BIO_BUG_ON(!bio->bi_size);
BIO_BUG_ON(!bio->bi_io_vec);
bio->bi_rw |= rw;
- if (rw & WRITE)
+ if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
- else
+ } else {
+ task_io_account_read(bio->bi_size);
count_vm_events(PGPGIN, count);
+ }
if (unlikely(block_dump)) {
char b[BDEVNAME_SIZE];
@@ -3374,8 +3498,6 @@ static void blk_done_softirq(struct softirq_action *h)
}
}
-#ifdef CONFIG_HOTPLUG_CPU
-
static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
@@ -3401,8 +3523,6 @@ static struct notifier_block __devinitdata blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};
-#endif /* CONFIG_HOTPLUG_CPU */
-
/**
* blk_complete_request - end I/O on a request
* @req: the request being processed
@@ -3495,6 +3615,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->buffer = bio_data(bio);
+ rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
}
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 79af43179421..1c3de2b9a6b5 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static void *noop_init_queue(request_queue_t *q, elevator_t *e)
+static void *noop_init_queue(request_queue_t *q)
{
struct noop_data *nd;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e55a75621437..2528a0c0dec8 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -226,9 +226,9 @@ static int sg_io(struct file *file, request_queue_t *q,
unsigned long start_time;
int writing = 0, ret = 0;
struct request *rq;
- struct bio *bio;
char sense[SCSI_SENSE_BUFFERSIZE];
unsigned char cmd[BLK_MAX_CDB];
+ struct bio *bio;
if (hdr->interface_id != 'S')
return -EINVAL;
@@ -258,6 +258,25 @@ static int sg_io(struct file *file, request_queue_t *q,
if (!rq)
return -ENOMEM;
+ /*
+ * fill in request structure
+ */
+ rq->cmd_len = hdr->cmd_len;
+ memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+ memcpy(rq->cmd, cmd, hdr->cmd_len);
+
+ memset(sense, 0, sizeof(sense));
+ rq->sense = sense;
+ rq->sense_len = 0;
+
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ rq->timeout = jiffies_to_msecs(hdr->timeout);
+ if (!rq->timeout)
+ rq->timeout = q->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_TIMEOUT;
+
if (hdr->iovec_count) {
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
struct sg_iovec *iov;
@@ -274,7 +293,8 @@ static int sg_io(struct file *file, request_queue_t *q,
goto out;
}
- ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
+ ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
+ hdr->dxfer_len);
kfree(iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
@@ -282,33 +302,7 @@ static int sg_io(struct file *file, request_queue_t *q,
if (ret)
goto out;
- /*
- * fill in request structure
- */
- rq->cmd_len = hdr->cmd_len;
- memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
- memcpy(rq->cmd, cmd, hdr->cmd_len);
-
- memset(sense, 0, sizeof(sense));
- rq->sense = sense;
- rq->sense_len = 0;
-
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
bio = rq->bio;
-
- /*
- * bounce this after holding a reference to the original bio, it's
- * needed for proper unmapping
- */
- if (rq->bio)
- blk_queue_bounce(q, &rq->bio);
-
- rq->timeout = (hdr->timeout * HZ) / 1000;
- if (!rq->timeout)
- rq->timeout = q->sg_timeout;
- if (!rq->timeout)
- rq->timeout = BLK_DEFAULT_TIMEOUT;
-
rq->retries = 0;
start_time = jiffies;
@@ -339,7 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q,
hdr->sb_len_wr = len;
}
- if (blk_rq_unmap_user(bio, hdr->dxfer_len))
+ if (blk_rq_unmap_user(bio))
ret = -EFAULT;
/* may not have succeeded, but output values written to control