summaryrefslogtreecommitdiff
path: root/drivers/block/null_blk_zoned.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2020-12-16 13:41:05 +0300
committerJiri Kosina <jkosina@suse.cz>2020-12-16 13:41:05 +0300
commite77bc7dc9af0ec53996367b2053dfafee83b7edb (patch)
tree7850cb0cc9e0d7308992b2b983052c5f209245bd /drivers/block/null_blk_zoned.c
parent105856b36c0cefc2fa1c1e649d75da71e2e38c31 (diff)
parent82514ecd61435c2d47c235e1343872b38db17be4 (diff)
downloadlinux-e77bc7dc9af0ec53996367b2053dfafee83b7edb.tar.xz
Merge branch 'for-5.11/elecom' into for-linus
- support for EX-G M-XGL20DLBK device, from YOSHIOKA Takuma
Diffstat (limited to 'drivers/block/null_blk_zoned.c')
-rw-r--r--drivers/block/null_blk_zoned.c205
1 files changed, 156 insertions, 49 deletions
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index fa0cc70f05e6..beb34b4f76b0 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include "null_blk.h"
#define CREATE_TRACE_POINTS
@@ -45,6 +46,22 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
if (!dev->zones)
return -ENOMEM;
+ /*
+ * With memory backing, the zone_lock spinlock needs to be temporarily
+ * released to avoid scheduling in atomic context. To guarantee zone
+ * information protection, use a bitmap to lock zones with
+ * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
+ * implies that the queue is marked with BLK_MQ_F_BLOCKING.
+ */
+ spin_lock_init(&dev->zone_lock);
+ if (dev->memory_backed) {
+ dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
+ if (!dev->zone_locks) {
+ kvfree(dev->zones);
+ return -ENOMEM;
+ }
+ }
+
if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1;
pr_info("changed the number of conventional zones to %u",
@@ -123,15 +140,31 @@ int null_register_zoned_dev(struct nullb *nullb)
void null_free_zoned_dev(struct nullb_device *dev)
{
+ bitmap_free(dev->zone_locks);
kvfree(dev->zones);
}
+static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
+{
+ if (dev->memory_backed)
+ wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
+ spin_lock_irq(&dev->zone_lock);
+}
+
+static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
+{
+ spin_unlock_irq(&dev->zone_lock);
+
+ if (dev->memory_backed)
+ clear_and_wake_up_bit(zno, dev->zone_locks);
+}
+
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
- unsigned int first_zone, i;
+ unsigned int first_zone, i, zno;
struct blk_zone zone;
int error;
@@ -142,15 +175,18 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
nr_zones = min(nr_zones, dev->nr_zones - first_zone);
trace_nullb_report_zones(nullb, nr_zones);
- for (i = 0; i < nr_zones; i++) {
+ zno = first_zone;
+ for (i = 0; i < nr_zones; i++, zno++) {
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback.
* So use a local copy to avoid corruption of the device zone
* array.
*/
- memcpy(&zone, &dev->zones[first_zone + i],
- sizeof(struct blk_zone));
+ null_lock_zone(dev, zno);
+ memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone));
+ null_unlock_zone(dev, zno);
+
error = cb(&zone, i, data);
if (error)
return error;
@@ -159,6 +195,10 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
return nr_zones;
}
+/*
+ * This is called in the case of memory backing from null_process_cmd()
+ * with the target zone already locked.
+ */
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len)
{
@@ -220,29 +260,34 @@ static void null_close_first_imp_zone(struct nullb_device *dev)
}
}
-static bool null_can_set_active(struct nullb_device *dev)
+static blk_status_t null_check_active(struct nullb_device *dev)
{
if (!dev->zone_max_active)
- return true;
+ return BLK_STS_OK;
- return dev->nr_zones_exp_open + dev->nr_zones_imp_open +
- dev->nr_zones_closed < dev->zone_max_active;
+ if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
+ dev->nr_zones_closed < dev->zone_max_active)
+ return BLK_STS_OK;
+
+ return BLK_STS_ZONE_ACTIVE_RESOURCE;
}
-static bool null_can_open(struct nullb_device *dev)
+static blk_status_t null_check_open(struct nullb_device *dev)
{
if (!dev->zone_max_open)
- return true;
+ return BLK_STS_OK;
if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
- return true;
+ return BLK_STS_OK;
- if (dev->nr_zones_imp_open && null_can_set_active(dev)) {
- null_close_first_imp_zone(dev);
- return true;
+ if (dev->nr_zones_imp_open) {
+ if (null_check_active(dev) == BLK_STS_OK) {
+ null_close_first_imp_zone(dev);
+ return BLK_STS_OK;
+ }
}
- return false;
+ return BLK_STS_ZONE_OPEN_RESOURCE;
}
/*
@@ -258,19 +303,22 @@ static bool null_can_open(struct nullb_device *dev)
* it is not certain that closing an implicit open zone will allow a new zone
* to be opened, since we might already be at the active limit capacity.
*/
-static bool null_has_zone_resources(struct nullb_device *dev, struct blk_zone *zone)
+static blk_status_t null_check_zone_resources(struct nullb_device *dev, struct blk_zone *zone)
{
+ blk_status_t ret;
+
switch (zone->cond) {
case BLK_ZONE_COND_EMPTY:
- if (!null_can_set_active(dev))
- return false;
+ ret = null_check_active(dev);
+ if (ret != BLK_STS_OK)
+ return ret;
fallthrough;
case BLK_ZONE_COND_CLOSED:
- return null_can_open(dev);
+ return null_check_open(dev);
default:
/* Should never be called for other states */
WARN_ON(1);
- return false;
+ return BLK_STS_IOERR;
}
}
@@ -287,21 +335,26 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ null_lock_zone(dev, zno);
+
switch (zone->cond) {
case BLK_ZONE_COND_FULL:
/* Cannot write to a full zone */
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_CLOSED:
- if (!null_has_zone_resources(dev, zone))
- return BLK_STS_IOERR;
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ goto unlock;
break;
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
break;
default:
/* Invalid zone condition */
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
}
/*
@@ -317,11 +370,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
else
cmd->rq->__sector = sector;
} else if (sector != zone->wp) {
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
}
- if (zone->wp + nr_sectors > zone->start + zone->capacity)
- return BLK_STS_IOERR;
+ if (zone->wp + nr_sectors > zone->start + zone->capacity) {
+ ret = BLK_STS_IOERR;
+ goto unlock;
+ }
if (zone->cond == BLK_ZONE_COND_CLOSED) {
dev->nr_zones_closed--;
@@ -332,9 +388,19 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
+ /*
+ * Memory backing allocation may sleep: release the zone_lock spinlock
+ * to avoid scheduling in atomic context. Zone operation atomicity is
+ * still guaranteed through the zone_locks bitmap.
+ */
+ if (dev->memory_backed)
+ spin_unlock_irq(&dev->zone_lock);
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ if (dev->memory_backed)
+ spin_lock_irq(&dev->zone_lock);
+
if (ret != BLK_STS_OK)
- return ret;
+ goto unlock;
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
@@ -344,11 +410,18 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
dev->nr_zones_imp_open--;
zone->cond = BLK_ZONE_COND_FULL;
}
- return BLK_STS_OK;
+ ret = BLK_STS_OK;
+
+unlock:
+ null_unlock_zone(dev, zno);
+
+ return ret;
}
static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone)
{
+ blk_status_t ret;
+
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
@@ -357,15 +430,17 @@ static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zo
/* open operation on exp open is not an error */
return BLK_STS_OK;
case BLK_ZONE_COND_EMPTY:
- if (!null_has_zone_resources(dev, zone))
- return BLK_STS_IOERR;
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
break;
case BLK_ZONE_COND_IMP_OPEN:
dev->nr_zones_imp_open--;
break;
case BLK_ZONE_COND_CLOSED:
- if (!null_has_zone_resources(dev, zone))
- return BLK_STS_IOERR;
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
dev->nr_zones_closed--;
break;
case BLK_ZONE_COND_FULL:
@@ -381,6 +456,8 @@ static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zo
static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *zone)
{
+ blk_status_t ret;
+
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
@@ -389,8 +466,9 @@ static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *
/* finish operation on full is not an error */
return BLK_STS_OK;
case BLK_ZONE_COND_EMPTY:
- if (!null_has_zone_resources(dev, zone))
- return BLK_STS_IOERR;
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
break;
case BLK_ZONE_COND_IMP_OPEN:
dev->nr_zones_imp_open--;
@@ -399,8 +477,9 @@ static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *
dev->nr_zones_exp_open--;
break;
case BLK_ZONE_COND_CLOSED:
- if (!null_has_zone_resources(dev, zone))
- return BLK_STS_IOERR;
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
dev->nr_zones_closed--;
break;
default:
@@ -447,16 +526,30 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- unsigned int zone_no = null_zone_no(dev, sector);
- struct blk_zone *zone = &dev->zones[zone_no];
- blk_status_t ret = BLK_STS_OK;
+ unsigned int zone_no;
+ struct blk_zone *zone;
+ blk_status_t ret;
size_t i;
+ if (op == REQ_OP_ZONE_RESET_ALL) {
+ for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
+ null_lock_zone(dev, i);
+ zone = &dev->zones[i];
+ if (zone->cond != BLK_ZONE_COND_EMPTY) {
+ null_reset_zone(dev, zone);
+ trace_nullb_zone_op(cmd, i, zone->cond);
+ }
+ null_unlock_zone(dev, i);
+ }
+ return BLK_STS_OK;
+ }
+
+ zone_no = null_zone_no(dev, sector);
+ zone = &dev->zones[zone_no];
+
+ null_lock_zone(dev, zone_no);
+
switch (op) {
- case REQ_OP_ZONE_RESET_ALL:
- for (i = dev->zone_nr_conv; i < dev->nr_zones; i++)
- null_reset_zone(dev, &dev->zones[i]);
- break;
case REQ_OP_ZONE_RESET:
ret = null_reset_zone(dev, zone);
break;
@@ -470,30 +563,44 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
ret = null_finish_zone(dev, zone);
break;
default:
- return BLK_STS_NOTSUPP;
+ ret = BLK_STS_NOTSUPP;
+ break;
}
if (ret == BLK_STS_OK)
trace_nullb_zone_op(cmd, zone_no, zone->cond);
+ null_unlock_zone(dev, zone_no);
+
return ret;
}
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector, sector_t nr_sectors)
{
+ struct nullb_device *dev = cmd->nq->dev;
+ unsigned int zno = null_zone_no(dev, sector);
+ blk_status_t sts;
+
switch (op) {
case REQ_OP_WRITE:
- return null_zone_write(cmd, sector, nr_sectors, false);
+ sts = null_zone_write(cmd, sector, nr_sectors, false);
+ break;
case REQ_OP_ZONE_APPEND:
- return null_zone_write(cmd, sector, nr_sectors, true);
+ sts = null_zone_write(cmd, sector, nr_sectors, true);
+ break;
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
- return null_zone_mgmt(cmd, op, sector);
+ sts = null_zone_mgmt(cmd, op, sector);
+ break;
default:
- return null_process_cmd(cmd, op, sector, nr_sectors);
+ null_lock_zone(dev, zno);
+ sts = null_process_cmd(cmd, op, sector, nr_sectors);
+ null_unlock_zone(dev, zno);
}
+
+ return sts;
}