summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig1
-rw-r--r--block/bfq-iosched.c5
-rw-r--r--block/bfq-wf2q.c13
-rw-r--r--block/blk-core.c24
-rw-r--r--block/blk-mq.c10
-rw-r--r--block/blk-stat.c17
-rw-r--r--block/elevator.c5
7 files changed, 44 insertions, 31 deletions
diff --git a/block/Kconfig b/block/Kconfig
index a8ad7e77db28..89cd28f8d051 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -6,7 +6,6 @@ menuconfig BLOCK
default y
select SBITMAP
select SRCU
- select DAX
help
Provide block layer support for the kernel.
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index bd8499ef157c..08ce45096350 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -56,6 +56,11 @@
* rotational or flash-based devices, and to get the job done quickly
* for applications consisting in many I/O-bound processes.
*
+ * NOTE: if the main or only goal, with a given device, is to achieve
+ * the maximum-possible throughput at all times, then do switch off
+ * all low-latency heuristics for that device, by setting low_latency
+ * to 0.
+ *
* BFQ is described in [1], where also a reference to the initial, more
* theoretical paper on BFQ can be found. The interested reader can find
* in the latter paper full details on the main algorithm, as well as
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index b4fc3e4260b7..8726ede19eef 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1114,12 +1114,21 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
{
struct bfq_sched_data *sd = entity->sched_data;
- struct bfq_service_tree *st = bfq_entity_service_tree(entity);
- int is_in_service = entity == sd->in_service_entity;
+ struct bfq_service_tree *st;
+ bool is_in_service;
if (!entity->on_st) /* entity never activated, or already inactive */
return false;
+ /*
+ * If we get here, then entity is active, which implies that
+ * bfq_group_set_parent has already been invoked for the group
+ * represented by entity. Therefore, the field
+ * entity->sched_data has been set, and we can safely use it.
+ */
+ st = bfq_entity_service_tree(entity);
+ is_in_service = entity == sd->in_service_entity;
+
if (is_in_service)
bfq_calc_finish(entity, entity->service);
diff --git a/block/blk-core.c b/block/blk-core.c
index c580b0138a7f..c7068520794b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2644,8 +2644,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
return false;
}
- WARN_ON_ONCE(req->rq_flags & RQF_SPECIAL_PAYLOAD);
-
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
@@ -2658,17 +2656,19 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
}
- /*
- * If total number of sectors is less than the first segment
- * size, something has gone terribly wrong.
- */
- if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
- blk_dump_rq_flags(req, "request botched");
- req->__data_len = blk_rq_cur_bytes(req);
- }
+ if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
+ /*
+ * If total number of sectors is less than the first segment
+ * size, something has gone terribly wrong.
+ */
+ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
+ blk_dump_rq_flags(req, "request botched");
+ req->__data_len = blk_rq_cur_bytes(req);
+ }
- /* recalculate the number of segments */
- blk_recalc_rq_segments(req);
+ /* recalculate the number of segments */
+ blk_recalc_rq_segments(req);
+ }
return true;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5d4ce7eb8dbf..a69ad122ed66 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1236,7 +1236,7 @@ void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
}
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
-void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
+static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
{
struct blk_mq_hw_ctx *hctx;
int i;
@@ -1554,13 +1554,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
+ blk_queue_split(q, &bio, q->bio_split);
+
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio_io_error(bio);
return BLK_QC_T_NONE;
}
- blk_queue_split(q, &bio, q->bio_split);
-
if (!is_flush_fua && !blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
return BLK_QC_T_NONE;
@@ -2341,15 +2341,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
- mutex_lock(&all_q_mutex);
get_online_cpus();
+ mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list);
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q, cpu_online_mask);
- put_online_cpus();
mutex_unlock(&all_q_mutex);
+ put_online_cpus();
if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
int ret;
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 6c2f40940439..c52356d90fe3 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -96,13 +96,16 @@ void blk_stat_add(struct request *rq)
rcu_read_lock();
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
- if (blk_stat_is_active(cb)) {
- bucket = cb->bucket_fn(rq);
- if (bucket < 0)
- continue;
- stat = &this_cpu_ptr(cb->cpu_stat)[bucket];
- __blk_stat_add(stat, value);
- }
+ if (!blk_stat_is_active(cb))
+ continue;
+
+ bucket = cb->bucket_fn(rq);
+ if (bucket < 0)
+ continue;
+
+ stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
+ __blk_stat_add(stat, value);
+ put_cpu_ptr(cb->cpu_stat);
}
rcu_read_unlock();
}
diff --git a/block/elevator.c b/block/elevator.c
index ab726a5c0bf6..dac99fbfc273 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1062,10 +1062,8 @@ static int __elevator_change(struct request_queue *q, const char *name)
strlcpy(elevator_name, name, sizeof(elevator_name));
e = elevator_get(strstrip(elevator_name), true);
- if (!e) {
- printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
+ if (!e)
return -EINVAL;
- }
if (q->elevator &&
!strcmp(elevator_name, q->elevator->type->elevator_name)) {
@@ -1105,7 +1103,6 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
if (!ret)
return count;
- printk(KERN_ERR "elevator: switch to %s failed\n", name);
return ret;
}