summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c109
1 files changed, 46 insertions, 63 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c36aa98fafa3..a824e49c0d0a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -28,9 +28,23 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/blktrace_api.h>
#include <linux/fault-inject.h>
+#include <trace/block.h>
#include "blk.h"
+DEFINE_TRACE(block_plug);
+DEFINE_TRACE(block_unplug_io);
+DEFINE_TRACE(block_unplug_timer);
+DEFINE_TRACE(block_getrq);
+DEFINE_TRACE(block_sleeprq);
+DEFINE_TRACE(block_rq_requeue);
+DEFINE_TRACE(block_bio_backmerge);
+DEFINE_TRACE(block_bio_frontmerge);
+DEFINE_TRACE(block_bio_queue);
+DEFINE_TRACE(block_rq_complete);
+DEFINE_TRACE(block_remap); /* Also used in drivers/md/dm.c */
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+
static int __make_request(struct request_queue *q, struct bio *bio);
/*
@@ -139,6 +153,9 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
nbytes = bio->bi_size;
}
+ if (unlikely(rq->cmd_flags & REQ_QUIET))
+ set_bit(BIO_QUIET, &bio->bi_flags);
+
bio->bi_size -= nbytes;
bio->bi_sector += (nbytes >> 9);
@@ -205,7 +222,7 @@ void blk_plug_device(struct request_queue *q)
if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
- blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+ trace_block_plug(q);
}
}
EXPORT_SYMBOL(blk_plug_device);
@@ -251,8 +268,7 @@ void __generic_unplug_device(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
-
- if (!blk_remove_plug(q))
+ if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
return;
q->request_fn(q);
@@ -292,9 +308,7 @@ void blk_unplug_work(struct work_struct *work)
struct request_queue *q =
container_of(work, struct request_queue, unplug_work);
- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
- q->rq.count[READ] + q->rq.count[WRITE]);
-
+ trace_block_unplug_io(q);
q->unplug_fn(q);
}
@@ -302,9 +316,7 @@ void blk_unplug_timeout(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
- q->rq.count[READ] + q->rq.count[WRITE]);
-
+ trace_block_unplug_timer(q);
kblockd_schedule_work(q, &q->unplug_work);
}
@@ -314,9 +326,7 @@ void blk_unplug(struct request_queue *q)
* devices don't necessarily have an ->unplug_fn defined
*/
if (q->unplug_fn) {
- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
- q->rq.count[READ] + q->rq.count[WRITE]);
-
+ trace_block_unplug_io(q);
q->unplug_fn(q);
}
}
@@ -396,7 +406,8 @@ EXPORT_SYMBOL(blk_stop_queue);
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->unplug_timer);
- kblockd_flush_work(&q->unplug_work);
+ del_timer_sync(&q->timeout);
+ cancel_work_sync(&q->unplug_work);
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -822,7 +833,7 @@ rq_starved:
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
- blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
+ trace_block_getrq(q, bio, rw);
out:
return rq;
}
@@ -848,7 +859,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+ trace_block_sleeprq(q, bio, rw);
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
@@ -928,7 +939,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
{
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+ trace_block_rq_requeue(q, rq);
if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);
@@ -1127,7 +1138,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
- int el_ret, nr_sectors, barrier, discard, err;
+ int el_ret, nr_sectors;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
int rw_flags;
@@ -1141,22 +1152,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
*/
blk_queue_bounce(q, &bio);
- barrier = bio_barrier(bio);
- if (unlikely(barrier) && bio_has_data(bio) &&
- (q->next_ordered == QUEUE_ORDERED_NONE)) {
- err = -EOPNOTSUPP;
- goto end_io;
- }
-
- discard = bio_discard(bio);
- if (unlikely(discard) && !q->prepare_discard_fn) {
- err = -EOPNOTSUPP;
- goto end_io;
- }
-
spin_lock_irq(q->queue_lock);
- if (unlikely(barrier) || elv_queue_empty(q))
+ if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
goto get_rq;
el_ret = elv_merge(q, &req, bio);
@@ -1167,7 +1165,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!ll_back_merge_fn(q, req, bio))
break;
- blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+ trace_block_bio_backmerge(q, bio);
req->biotail->bi_next = bio;
req->biotail = bio;
@@ -1186,7 +1184,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!ll_front_merge_fn(q, req, bio))
break;
- blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+ trace_block_bio_frontmerge(q, bio);
bio->bi_next = req->bio;
req->bio = bio;
@@ -1242,18 +1240,14 @@ get_rq:
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
bio_flagged(bio, BIO_CPU_AFFINE))
req->cpu = blk_cpu_to_group(smp_processor_id());
- if (elv_queue_empty(q))
+ if (!blk_queue_nonrot(q) && elv_queue_empty(q))
blk_plug_device(q);
add_request(q, req);
out:
- if (sync)
+ if (sync || blk_queue_nonrot(q))
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
return 0;
-
-end_io:
- bio_endio(bio, err);
- return 0;
}
/*
@@ -1269,7 +1263,7 @@ static inline void blk_partition_remap(struct bio *bio)
bio->bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
- blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
+ trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
bdev->bd_dev, bio->bi_sector,
bio->bi_sector - p->start_sect);
}
@@ -1406,15 +1400,13 @@ static inline void __generic_make_request(struct bio *bio)
char b[BDEVNAME_SIZE];
q = bdev_get_queue(bio->bi_bdev);
- if (!q) {
+ if (unlikely(!q)) {
printk(KERN_ERR
"generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n",
bdevname(bio->bi_bdev, b),
(long long) bio->bi_sector);
-end_io:
- bio_endio(bio, err);
- break;
+ goto end_io;
}
if (unlikely(nr_sectors > q->max_hw_sectors)) {
@@ -1441,24 +1433,29 @@ end_io:
goto end_io;
if (old_sector != -1)
- blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+ trace_block_remap(q, bio, old_dev, bio->bi_sector,
old_sector);
- blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+ trace_block_bio_queue(q, bio);
old_sector = bio->bi_sector;
old_dev = bio->bi_bdev->bd_dev;
if (bio_check_eod(bio, nr_sectors))
goto end_io;
- if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
- (bio_discard(bio) && !q->prepare_discard_fn)) {
+
+ if (bio_discard(bio) && !q->prepare_discard_fn) {
err = -EOPNOTSUPP;
goto end_io;
}
ret = q->make_request_fn(q, bio);
} while (ret);
+
+ return;
+
+end_io:
+ bio_endio(bio, err);
}
/*
@@ -1678,7 +1675,7 @@ static int __end_that_request_first(struct request *req, int error,
int total_bytes, bio_nbytes, next_idx = 0;
struct bio *bio;
- blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+ trace_block_rq_complete(req->q, req);
/*
* for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
@@ -1708,14 +1705,6 @@ static int __end_that_request_first(struct request *req, int error,
while ((bio = req->bio) != NULL) {
int nbytes;
- /*
- * For an empty barrier request, the low level driver must
- * store a potential error location in ->sector. We pass
- * that back up in ->bi_sector.
- */
- if (blk_empty_barrier(req))
- bio->bi_sector = req->sector;
-
if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next;
nbytes = bio->bi_size;
@@ -2135,12 +2124,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
-void kblockd_flush_work(struct work_struct *work)
-{
- cancel_work_sync(work);
-}
-EXPORT_SYMBOL(kblockd_flush_work);
-
int __init blk_dev_init(void)
{
kblockd_workqueue = create_workqueue("kblockd");