summaryrefslogtreecommitdiff
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c80
1 files changed, 48 insertions, 32 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e85941bec857..d80a202cd170 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -23,14 +23,14 @@
struct queue_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct gendisk *disk, char *page);
- int (*load_module)(struct gendisk *disk, const char *page, size_t count);
ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
+ void (*load_module)(struct gendisk *disk, const char *page, size_t count);
};
static ssize_t
queue_var_show(unsigned long var, char *page)
{
- return sprintf(page, "%lu\n", var);
+ return sysfs_emit(page, "%lu\n", var);
}
static ssize_t
@@ -121,7 +121,7 @@ QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
#define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field) \
static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
{ \
- return sprintf(page, "%llu\n", \
+ return sysfs_emit(page, "%llu\n", \
(unsigned long long)disk->queue->limits._field << \
SECTOR_SHIFT); \
}
@@ -131,6 +131,7 @@ QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors)
#define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field) \
static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
@@ -144,7 +145,7 @@ QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
#define QUEUE_SYSFS_SHOW_CONST(_name, _val) \
static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
{ \
- return sprintf(page, "%d\n", _val); \
+ return sysfs_emit(page, "%d\n", _val); \
}
/* deprecated fields */
@@ -178,18 +179,6 @@ static ssize_t queue_max_discard_sectors_store(struct gendisk *disk,
return ret;
}
-/*
- * For zone append queue_max_zone_append_sectors does not just return the
- * underlying queue limits, but actually contains a calculation. Because of
- * that we can't simply use QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES here.
- */
-static ssize_t queue_zone_append_max_show(struct gendisk *disk, char *page)
-{
- return sprintf(page, "%llu\n",
- (u64)queue_max_zone_append_sectors(disk->queue) <<
- SECTOR_SHIFT);
-}
-
static ssize_t
queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count)
{
@@ -235,7 +224,7 @@ static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
#define QUEUE_SYSFS_FEATURE(_name, _feature) \
static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
{ \
- return sprintf(page, "%u\n", \
+ return sysfs_emit(page, "%u\n", \
!!(disk->queue->limits.features & _feature)); \
} \
static ssize_t queue_##_name##_store(struct gendisk *disk, \
@@ -252,7 +241,7 @@ QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
#define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature) \
static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
{ \
- return sprintf(page, "%u\n", \
+ return sysfs_emit(page, "%u\n", \
!!(disk->queue->limits.features & _feature)); \
}
@@ -263,8 +252,8 @@ QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
{
if (blk_queue_is_zoned(disk->queue))
- return sprintf(page, "host-managed\n");
- return sprintf(page, "none\n");
+ return sysfs_emit(page, "host-managed\n");
+ return sysfs_emit(page, "none\n");
}
static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
@@ -272,6 +261,34 @@ static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
return queue_var_show(disk_nr_zones(disk), page);
}
+static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
+{
+ return queue_var_show(blk_queue_passthrough_stat(disk->queue), page);
+}
+
+static ssize_t queue_iostats_passthrough_store(struct gendisk *disk,
+ const char *page, size_t count)
+{
+ struct queue_limits lim;
+ unsigned long ios;
+ ssize_t ret;
+
+ ret = queue_var_store(&ios, page, count);
+ if (ret < 0)
+ return ret;
+
+ lim = queue_limits_start_update(disk->queue);
+ if (ios)
+ lim.flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
+ else
+ lim.flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
+
+ ret = queue_limits_commit_update(disk->queue, &lim);
+ if (ret)
+ return ret;
+
+ return count;
+}
static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
{
return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
@@ -349,7 +366,7 @@ static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
{
- return sprintf(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
+ return sysfs_emit(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
}
static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
@@ -370,8 +387,8 @@ static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
static ssize_t queue_wc_show(struct gendisk *disk, char *page)
{
if (blk_queue_write_cache(disk->queue))
- return sprintf(page, "write back\n");
- return sprintf(page, "write through\n");
+ return sysfs_emit(page, "write back\n");
+ return sysfs_emit(page, "write through\n");
}
static ssize_t queue_wc_store(struct gendisk *disk, const char *page,
@@ -451,7 +468,7 @@ QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
-QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
+QUEUE_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
QUEUE_RO_ENTRY(queue_zoned, "zoned");
@@ -460,6 +477,7 @@ QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
+QUEUE_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
QUEUE_RW_ENTRY(queue_poll, "io_poll");
QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
@@ -501,9 +519,9 @@ static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
return -EINVAL;
if (wbt_disabled(disk->queue))
- return sprintf(page, "0\n");
+ return sysfs_emit(page, "0\n");
- return sprintf(page, "%llu\n",
+ return sysfs_emit(page, "%llu\n",
div_u64(wbt_get_min_lat(disk->queue), 1000));
}
@@ -578,7 +596,7 @@ static struct attribute *queue_attrs[] = {
&queue_atomic_write_unit_max_entry.attr,
&queue_write_same_max_entry.attr,
&queue_max_write_zeroes_sectors_entry.attr,
- &queue_zone_append_max_entry.attr,
+ &queue_max_zone_append_sectors_entry.attr,
&queue_zone_write_granularity_entry.attr,
&queue_rotational_entry.attr,
&queue_zoned_entry.attr,
@@ -586,6 +604,7 @@ static struct attribute *queue_attrs[] = {
&queue_max_open_zones_entry.attr,
&queue_max_active_zones_entry.attr,
&queue_nomerges_entry.attr,
+ &queue_iostats_passthrough_entry.attr,
&queue_iostats_entry.attr,
&queue_stable_writes_entry.attr,
&queue_add_random_entry.attr,
@@ -684,11 +703,8 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
* queue to ensure that the module file can be read when the request
* queue is the one for the device storing the module file.
*/
- if (entry->load_module) {
- res = entry->load_module(disk, page, length);
- if (res)
- return res;
- }
+ if (entry->load_module)
+ entry->load_module(disk, page, length);
blk_mq_freeze_queue(q);
mutex_lock(&q->sysfs_lock);