summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2018-07-24 21:02:40 +0300
committerAndreas Gruenbacher <agruenba@redhat.com>2018-07-24 21:02:40 +0300
commita3479c7fc096a1a7a2dccbfbdc6fcf86b805711a (patch)
tree85b0044b207d05b0cf5f118f2160a8de996073ce /block/blk-mq.c
parent109dbb1e6f27fb8f80ee61953485c7c3b1717951 (diff)
parent025d0e7f73c6a9cc3ca2fe7de821792a8f3269bf (diff)
downloadlinux-a3479c7fc096a1a7a2dccbfbdc6fcf86b805711a.tar.xz
Merge branch 'iomap-write' into linux-gfs2/for-next
Pull in the gfs2 iomap-write changes: Tweak the existing code to properly support iomap write and eliminate an unnecessary special case in gfs2_block_map. Implement iomap write support for buffered and direct I/O. Simplify some of the existing code and eliminate code that is no longer used: gfs2: Remove gfs2_write_{begin,end} gfs2: iomap direct I/O support gfs2: gfs2_extent_length cleanup gfs2: iomap buffered write support gfs2: Further iomap cleanups This is based on the following changes on the xfs 'iomap-4.19-merge' branch: iomap: add private pointer to struct iomap iomap: add a page_done callback iomap: generic inline data handling iomap: complete partial direct I/O writes synchronously iomap: mark newly allocated buffer heads as new fs: factor out a __generic_write_end helper Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d2de0a719ab8..70c65bb6c013 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -671,6 +671,7 @@ static void __blk_mq_requeue_request(struct request *rq)
if (blk_mq_request_started(rq)) {
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
+ rq->rq_flags &= ~RQF_TIMED_OUT;
if (q->dma_drain_size && blk_rq_bytes(rq))
rq->nr_phys_segments--;
}
@@ -770,6 +771,7 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq);
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
+ req->rq_flags |= RQF_TIMED_OUT;
if (req->q->mq_ops->timeout) {
enum blk_eh_timer_return ret;
@@ -779,6 +781,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
}
+ req->rq_flags &= ~RQF_TIMED_OUT;
blk_add_timer(req);
}
@@ -788,6 +791,8 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
return false;
+ if (rq->rq_flags & RQF_TIMED_OUT)
+ return false;
deadline = blk_rq_deadline(rq);
if (time_after_eq(jiffies, deadline))
@@ -1903,7 +1908,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
if (!tags)
return NULL;
- tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
+ tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
node);
if (!tags->rqs) {
@@ -1911,9 +1916,9 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
return NULL;
}
- tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
- node);
+ tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+ node);
if (!tags->static_rqs) {
kfree(tags->rqs);
blk_mq_free_tags(tags);
@@ -2349,7 +2354,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
mutex_lock(&set->tag_list_lock);
list_del_rcu(&q->tag_set_list);
- INIT_LIST_HEAD(&q->tag_set_list);
if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED;
@@ -2357,8 +2361,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(&set->tag_list_lock);
-
synchronize_rcu();
+ INIT_LIST_HEAD(&q->tag_set_list);
}
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@ -2522,7 +2526,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
- q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
+ q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node);
if (!q->queue_hw_ctx)
goto err_percpu;
@@ -2741,14 +2745,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;
- set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
+ set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!set->tags)
return -ENOMEM;
ret = -ENOMEM;
- set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
- GFP_KERNEL, set->numa_node);
+ set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
+ GFP_KERNEL, set->numa_node);
if (!set->mq_map)
goto out_free_tags;