summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/badblocks.c29
-rw-r--r--block/blk-flush.c28
-rw-r--r--block/blk-mq.c6
3 files changed, 58 insertions, 5 deletions
diff --git a/block/badblocks.c b/block/badblocks.c
index 7be53cb1cc3c..6ebcef282314 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -133,6 +133,26 @@ retry:
}
EXPORT_SYMBOL_GPL(badblocks_check);
+static void badblocks_update_acked(struct badblocks *bb)
+{
+ u64 *p = bb->page;
+ int i;
+ bool unacked = false;
+
+ if (!bb->unacked_exist)
+ return;
+
+ for (i = 0; i < bb->count ; i++) {
+ if (!BB_ACK(p[i])) {
+ unacked = true;
+ break;
+ }
+ }
+
+ if (!unacked)
+ bb->unacked_exist = 0;
+}
+
/**
* badblocks_set() - Add a range of bad blocks to the table.
* @bb: the badblocks structure that holds all badblock information
@@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
+ else
+ badblocks_update_acked(bb);
write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
@@ -354,7 +376,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
* current range. Earlier ranges could also overlap,
* but only this one can overlap the end of the range.
*/
- if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
+ if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
+ (BB_OFFSET(p[lo]) < target)) {
/* Partial overlap, leave the tail of this range */
int ack = BB_ACK(p[lo]);
sector_t a = BB_OFFSET(p[lo]);
@@ -377,7 +400,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
lo--;
}
while (lo >= 0 &&
- BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
+ (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
+ (BB_OFFSET(p[lo]) < target)) {
/* This range does overlap */
if (BB_OFFSET(p[lo]) < s) {
/* Keep the early parts of this range. */
@@ -399,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
}
}
+ badblocks_update_acked(bb);
bb->changed = 1;
out:
write_sequnlock_irq(&bb->lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6a14b68b9135..3c882cbc7541 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -343,6 +343,34 @@ static void flush_data_end_io(struct request *rq, int error)
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
/*
+ * Updating q->in_flight[] here for making this tag usable
+ * early. Because in blk_queue_start_tag(),
+ * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
+ * reserve tags for sync I/O.
+ *
+ * More importantly this way can avoid the following I/O
+ * deadlock:
+ *
+ * - suppose there are 40 fua requests comming to flush queue
+ * and queue depth is 31
+ * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
+ * tag for async I/O any more
+ * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
+ * and flush_data_end_io() is called
+ * - the other rqs still can't go ahead if not updating
+ * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
+ * are held in flush data queue and make no progress of
+ * handling post flush rq
+ * - only after the post flush rq is handled, all these rqs
+ * can be completed
+ */
+
+ elv_completed_request(q, rq);
+
+ /* for avoiding double accounting */
+ rq->cmd_flags &= ~REQ_STARTED;
+
+ /*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
*/
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ddc2eed64771..f3d27a6dee09 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q,
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
- hctx->queued++;
- data->hctx = hctx;
- data->ctx = ctx;
+ data->hctx = alloc_data.hctx;
+ data->ctx = alloc_data.ctx;
+ data->hctx->queued++;
return rq;
}