summaryrefslogtreecommitdiff
path: root/drivers/target
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2023-05-22 23:35:02 +0300
committerMartin K. Petersen <martin.petersen@oracle.com>2023-05-22 23:35:02 +0300
commit7907ad748bdba8ac9ca47f0a650cc2e5d2ad6e24 (patch)
tree068ffd5248c8c988015fc751fe8b68dd51347943 /drivers/target
parent16853cd8f6d44d774f683d670be38c7d91eb32b8 (diff)
parent394f811848827ad23d2b43e94e5d72a24cfbc39f (diff)
downloadlinux-7907ad748bdba8ac9ca47f0a650cc2e5d2ad6e24.tar.xz
Merge patch series "Use block pr_ops in LIO"
Mike Christie <michael.christie@oracle.com> says: The patches in this thread allow us to use the block pr_ops with LIO's target_core_iblock module to support cluster applications in VMs. They were built over Linus's tree. They also apply over linux-next and Martin's tree and Jens's trees. Currently, to use windows clustering or linux clustering (pacemaker + cluster labs scsi fence agents) in VMs with LIO and vhost-scsi, you have to use tcmu or pscsi or use a cluster aware FS/framework for the LIO pr file. Setting up a cluster FS/framework is pain and waste when your real backend device is already a distributed device, and pscsi and tcmu are nice for specific use cases, but iblock gives you the best performance and allows you to use stacked devices like dm-multipath. So these patches allow iblock to work like pscsi/tcmu where they can pass a PR command to the backend module. And then iblock will use the pr_ops to pass the PR command to the real devices similar to what we do for unmap today. The patches are separated in the following groups: Patch 1 - 2: - Add block layer callouts for reading reservations and rename reservation error code. Patch 3 - 5: - SCSI support for new callouts. Patch 6: - DM support for new callouts. Patch 7 - 13: - NVMe support for new callouts. Patch 14 - 18: - LIO support for new callouts. This patchset has been tested with the libiscsi PGR ops and with window's failover cluster verification test. Note that for scsi backend devices we need this patchset: https://lore.kernel.org/linux-scsi/20230123221046.125483-1-michael.christie@oracle.com/T/#m4834a643ffb5bac2529d65d40906d3cfbdd9b1b7 to handle UAs. To reduce the size of this patchset that's being done separately to make reviewing easier. And to make merging easier this patchset and the one above do not have any conflicts so can be merged in different trees. Link: https://lore.kernel.org/r/20230407200551.12660-1-michael.christie@oracle.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_iblock.c275
-rw-r--r--drivers/target/target_core_pr.c79
-rw-r--r--drivers/target/target_core_rd.c4
-rw-r--r--drivers/target/target_core_sbc.c13
-rw-r--r--drivers/target/target_core_spc.c113
6 files changed, 436 insertions, 52 deletions
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index ce0e000b74fc..4d447520bab8 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -896,7 +896,7 @@ static void fd_free_prot(struct se_device *dev)
fd_dev->fd_prot_file = NULL;
}
-static struct sbc_ops fd_sbc_ops = {
+static struct exec_cmd_ops fd_exec_cmd_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
.execute_write_same = fd_execute_write_same,
@@ -906,7 +906,7 @@ static struct sbc_ops fd_sbc_ops = {
static sense_reason_t
fd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &fd_sbc_ops);
+ return sbc_parse_cdb(cmd, &fd_exec_cmd_ops);
}
static const struct target_backend_ops fileio_ops = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index cc838ffd1294..e6029ea87e2f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -23,13 +23,16 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/pr.h>
#include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_iblock.h"
+#include "target_core_pr.h"
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE 128
@@ -310,7 +313,7 @@ static sector_t iblock_get_blocks(struct se_device *dev)
return blocks_long;
}
-static void iblock_complete_cmd(struct se_cmd *cmd)
+static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status)
{
struct iblock_req *ibr = cmd->priv;
u8 status;
@@ -318,7 +321,9 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
if (!refcount_dec_and_test(&ibr->pending))
return;
- if (atomic_read(&ibr->ib_bio_err_cnt))
+ if (blk_status == BLK_STS_RESV_CONFLICT)
+ status = SAM_STAT_RESERVATION_CONFLICT;
+ else if (atomic_read(&ibr->ib_bio_err_cnt))
status = SAM_STAT_CHECK_CONDITION;
else
status = SAM_STAT_GOOD;
@@ -331,6 +336,7 @@ static void iblock_bio_done(struct bio *bio)
{
struct se_cmd *cmd = bio->bi_private;
struct iblock_req *ibr = cmd->priv;
+ blk_status_t blk_status = bio->bi_status;
if (bio->bi_status) {
pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
@@ -343,7 +349,7 @@ static void iblock_bio_done(struct bio *bio)
bio_put(bio);
- iblock_complete_cmd(cmd);
+ iblock_complete_cmd(cmd, blk_status);
}
static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
@@ -759,7 +765,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!sgl_nents) {
refcount_set(&ibr->pending, 1);
- iblock_complete_cmd(cmd);
+ iblock_complete_cmd(cmd, BLK_STS_OK);
return 0;
}
@@ -817,7 +823,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
iblock_submit_bios(&list);
- iblock_complete_cmd(cmd);
+ iblock_complete_cmd(cmd, BLK_STS_OK);
return 0;
fail_put_bios:
@@ -829,6 +835,258 @@ fail:
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key,
+ u64 sa_key, u8 type, bool aptpl)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bdev = ib_dev->ibd_bd;
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ int ret;
+
+ if (!ops) {
+ pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ switch (sa) {
+ case PRO_REGISTER:
+ case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+ if (!ops->pr_register) {
+ pr_err("block device does not support pr_register.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ /* The block layer pr ops always enables aptpl */
+ if (!aptpl)
+ pr_info("APTPL not set by initiator, but will be used.\n");
+
+ ret = ops->pr_register(bdev, key, sa_key,
+ sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY);
+ break;
+ case PRO_RESERVE:
+ if (!ops->pr_reserve) {
+ pr_err("block_device does not support pr_reserve.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0);
+ break;
+ case PRO_CLEAR:
+ if (!ops->pr_clear) {
+ pr_err("block_device does not support pr_clear.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_clear(bdev, key);
+ break;
+ case PRO_PREEMPT:
+ case PRO_PREEMPT_AND_ABORT:
+ if (!ops->pr_clear) {
+ pr_err("block_device does not support pr_preempt.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_preempt(bdev, key, sa_key,
+ scsi_pr_type_to_block(type),
+ sa == PRO_PREEMPT ? false : true);
+ break;
+ case PRO_RELEASE:
+ if (!ops->pr_clear) {
+ pr_err("block_device does not support pr_pclear.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type));
+ break;
+ default:
+ pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (!ret)
+ return TCM_NO_SENSE;
+ else if (ret == PR_STS_RESERVATION_CONFLICT)
+ return TCM_RESERVATION_CONFLICT;
+ else
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
+static void iblock_pr_report_caps(unsigned char *param_data)
+{
+ u16 len = 8;
+
+ put_unaligned_be16(len, &param_data[0]);
+ /*
+ * When using the pr_ops passthrough method we only support exporting
+ * the device through one target port because from the backend module
+ * level we can't see the target port config. As a result we only
+ * support registration directly from the I_T nexus the cmd is sent
+ * through and do not set ATP_C here.
+ *
+ * The block layer pr_ops do not support passing in initiators so
+ * we don't set SIP_C here.
+ */
+ /* PTPL_C: Persistence across Target Power Loss bit */
+ param_data[2] |= 0x01;
+ /*
+ * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+ * set the TMV: Task Mask Valid bit.
+ */
+ param_data[3] |= 0x80;
+ /*
+ * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+ */
+ param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+ /*
+ * PTPL_A: Persistence across Target Power Loss Active bit. The block
+ * layer pr ops always enables this so report it active.
+ */
+ param_data[3] |= 0x01;
+ /*
+ * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
+ */
+ param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+ param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+ param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+ param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+ param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+}
+
+static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd,
+ unsigned char *param_data)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bdev = ib_dev->ibd_bd;
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ int i, len, paths, data_offset;
+ struct pr_keys *keys;
+ sense_reason_t ret;
+
+ if (!ops) {
+ pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (!ops->pr_read_keys) {
+ pr_err("Block device does not support read_keys.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ /*
+ * We don't know what's under us, but dm-multipath will register every
+ * path with the same key, so start off with enough space for 16 paths.
+ * which is not a lot of memory and should normally be enough.
+ */
+ paths = 16;
+retry:
+ len = 8 * paths;
+ keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL);
+ if (!keys)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ keys->num_keys = paths;
+ if (!ops->pr_read_keys(bdev, keys)) {
+ if (keys->num_keys > paths) {
+ kfree(keys);
+ paths *= 2;
+ goto retry;
+ }
+ } else {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto free_keys;
+ }
+
+ ret = TCM_NO_SENSE;
+
+ put_unaligned_be32(keys->generation, &param_data[0]);
+ if (!keys->num_keys) {
+ put_unaligned_be32(0, &param_data[4]);
+ goto free_keys;
+ }
+
+ put_unaligned_be32(8 * keys->num_keys, &param_data[4]);
+
+ data_offset = 8;
+ for (i = 0; i < keys->num_keys; i++) {
+ if (data_offset + 8 > cmd->data_length)
+ break;
+
+ put_unaligned_be64(keys->keys[i], &param_data[data_offset]);
+ data_offset += 8;
+ }
+
+free_keys:
+ kfree(keys);
+ return ret;
+}
+
+static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd,
+ unsigned char *param_data)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bdev = ib_dev->ibd_bd;
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_held_reservation rsv = { };
+
+ if (!ops) {
+ pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (!ops->pr_read_reservation) {
+ pr_err("Block device does not support read_keys.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (ops->pr_read_reservation(bdev, &rsv))
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ put_unaligned_be32(rsv.generation, &param_data[0]);
+ if (!block_pr_type_to_scsi(rsv.type)) {
+ put_unaligned_be32(0, &param_data[4]);
+ return TCM_NO_SENSE;
+ }
+
+ put_unaligned_be32(16, &param_data[4]);
+
+ if (cmd->data_length < 16)
+ return TCM_NO_SENSE;
+ put_unaligned_be64(rsv.key, &param_data[8]);
+
+ if (cmd->data_length < 22)
+ return TCM_NO_SENSE;
+ param_data[21] = block_pr_type_to_scsi(rsv.type);
+
+ return TCM_NO_SENSE;
+}
+
+static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa,
+ unsigned char *param_data)
+{
+ sense_reason_t ret = TCM_NO_SENSE;
+
+ switch (sa) {
+ case PRI_REPORT_CAPABILITIES:
+ iblock_pr_report_caps(param_data);
+ break;
+ case PRI_READ_KEYS:
+ ret = iblock_pr_read_keys(cmd, param_data);
+ break;
+ case PRI_READ_RESERVATION:
+ ret = iblock_pr_read_reservation(cmd, param_data);
+ break;
+ default:
+ pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ return ret;
+}
+
static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -869,17 +1127,19 @@ static unsigned int iblock_get_io_opt(struct se_device *dev)
return bdev_io_opt(bd);
}
-static struct sbc_ops iblock_sbc_ops = {
+static struct exec_cmd_ops iblock_exec_cmd_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
.execute_write_same = iblock_execute_write_same,
.execute_unmap = iblock_execute_unmap,
+ .execute_pr_out = iblock_execute_pr_out,
+ .execute_pr_in = iblock_execute_pr_in,
};
static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &iblock_sbc_ops);
+ return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops);
}
static bool iblock_get_write_cache(struct se_device *dev)
@@ -890,6 +1150,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
static const struct target_backend_ops iblock_ops = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
+ .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR,
.inquiry_rev = IBLOCK_VERSION,
.owner = THIS_MODULE,
.attach_hba = iblock_attach_hba,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d19ec4e6a4c0..49d9167bb263 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -3538,6 +3538,37 @@ out_put_pr_reg:
return ret;
}
+static sense_reason_t
+target_try_pr_out_pt(struct se_cmd *cmd, u8 sa, u64 res_key, u64 sa_res_key,
+ u8 type, bool aptpl, bool all_tg_pt, bool spec_i_pt)
+{
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+
+ if (!cmd->se_sess || !cmd->se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ if (!ops->execute_pr_out) {
+ pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ switch (sa) {
+ case PRO_REGISTER_AND_MOVE:
+ case PRO_REPLACE_LOST_RESERVATION:
+ pr_err("SPC-3 PR: PRO_REGISTER_AND_MOVE and PRO_REPLACE_LOST_RESERVATION are not supported by PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (spec_i_pt || all_tg_pt) {
+ pr_err("SPC-3 PR: SPEC_I_PT and ALL_TG_PT are not supported by PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ return ops->execute_pr_out(cmd, sa, res_key, sa_res_key, type, aptpl);
+}
+
/*
* See spc4r17 section 6.14 Table 170
*/
@@ -3641,6 +3672,12 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
return TCM_PARAMETER_LIST_LENGTH_ERROR;
}
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) {
+ ret = target_try_pr_out_pt(cmd, sa, res_key, sa_res_key, type,
+ aptpl, all_tg_pt, spec_i_pt);
+ goto done;
+ }
+
/*
* (core_scsi3_emulate_pro_* function parameters
* are defined by spc4r17 Table 174:
@@ -3682,6 +3719,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD;
}
+done:
if (!ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
@@ -4039,9 +4077,42 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return 0;
}
+static sense_reason_t target_try_pr_in_pt(struct se_cmd *cmd, u8 sa)
+{
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+ unsigned char *buf;
+ sense_reason_t ret;
+
+ if (cmd->data_length < 8) {
+ pr_err("PRIN SA SCSI Data Length: %u too small\n",
+ cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (!ops->execute_pr_in) {
+ pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (sa == PRI_READ_FULL_STATUS) {
+ pr_err("SPC-3 PR: PRI_READ_FULL_STATUS is not supported by PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ ret = ops->execute_pr_in(cmd, sa, buf);
+
+ transport_kunmap_data_sg(cmd);
+ return ret;
+}
+
sense_reason_t
target_scsi3_emulate_pr_in(struct se_cmd *cmd)
{
+ u8 sa = cmd->t_task_cdb[1] & 0x1f;
sense_reason_t ret;
/*
@@ -4060,7 +4131,12 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
return TCM_RESERVATION_CONFLICT;
}
- switch (cmd->t_task_cdb[1] & 0x1f) {
+ if (cmd->se_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) {
+ ret = target_try_pr_in_pt(cmd, sa);
+ goto done;
+ }
+
+ switch (sa) {
case PRI_READ_KEYS:
ret = core_scsi3_pri_read_keys(cmd);
break;
@@ -4079,6 +4155,7 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD;
}
+done:
if (!ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 6648c1c90e19..6f67cc09c2b5 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -643,14 +643,14 @@ static void rd_free_prot(struct se_device *dev)
rd_release_prot_space(rd_dev);
}
-static struct sbc_ops rd_sbc_ops = {
+static struct exec_cmd_ops rd_exec_cmd_ops = {
.execute_rw = rd_execute_rw,
};
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &rd_sbc_ops);
+ return sbc_parse_cdb(cmd, &rd_exec_cmd_ops);
}
static const struct target_backend_ops rd_mcp_ops = {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 7536ca797606..6a02561cc20c 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -192,7 +192,7 @@ EXPORT_SYMBOL(sbc_get_write_same_sectors);
static sense_reason_t
sbc_execute_write_same_unmap(struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
sector_t nolb = sbc_get_write_same_sectors(cmd);
sense_reason_t ret;
@@ -271,7 +271,8 @@ static inline unsigned long long transport_lba_64(unsigned char *cdb)
}
static sense_reason_t
-sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags,
+ struct exec_cmd_ops *ops)
{
struct se_device *dev = cmd->se_dev;
sector_t end_lba = dev->transport->get_blocks(dev) + 1;
@@ -340,7 +341,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op
static sense_reason_t
sbc_execute_rw(struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
cmd->data_direction);
@@ -566,7 +567,7 @@ out:
static sense_reason_t
sbc_compare_and_write(struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
sense_reason_t ret;
int rc;
@@ -764,7 +765,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
}
sense_reason_t
-sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
@@ -1076,7 +1077,7 @@ EXPORT_SYMBOL(sbc_get_device_type);
static sense_reason_t
sbc_execute_unmap(struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
unsigned char *buf, *ptr = NULL;
sector_t lba;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 89c0d56294cc..50290abc07bc 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1424,9 +1424,10 @@ static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static bool tcm_is_ws_enabled(struct se_cmd *cmd)
+static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) ||
@@ -1451,7 +1452,8 @@ static struct target_opcode_descriptor tcm_opcode_write_same32 = {
.update_usage_bits = set_dpofua_usage_bits32,
};
-static bool tcm_is_caw_enabled(struct se_cmd *cmd)
+static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1491,7 +1493,8 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd)
+static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1502,7 +1505,6 @@ static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd)
}
spin_unlock(&dev->t10_alua.lba_map_lock);
return true;
-
}
static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
@@ -1537,9 +1539,10 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
};
-static bool tcm_is_unmap_enabled(struct se_cmd *cmd)
+static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
@@ -1659,11 +1662,46 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
0xff, SCSI_CONTROL_MASK},
};
-static bool tcm_is_pr_enabled(struct se_cmd *cmd)
+static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- return dev->dev_attrib.emulate_pr;
+ if (!dev->dev_attrib.emulate_pr)
+ return false;
+
+ if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ return true;
+
+ switch (descr->opcode) {
+ case RESERVE:
+ case RESERVE_10:
+ case RELEASE:
+ case RELEASE_10:
+ /*
+ * The pr_ops which are used by the backend modules don't
+ * support these commands.
+ */
+ return false;
+ case PERSISTENT_RESERVE_OUT:
+ switch (descr->service_action) {
+ case PRO_REGISTER_AND_MOVE:
+ case PRO_REPLACE_LOST_RESERVATION:
+ /*
+ * The backend modules don't have access to ports and
+ * I_T nexuses so they can't handle these type of
+ * requests.
+ */
+ return false;
+ }
+ break;
+ case PERSISTENT_RESERVE_IN:
+ if (descr->service_action == PRI_READ_FULL_STATUS)
+ return false;
+ break;
+ }
+
+ return true;
}
static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
@@ -1788,20 +1826,13 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
.enabled = tcm_is_pr_enabled,
};
-static bool tcm_is_scsi2_reservations_enabled(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
-
- return dev->dev_attrib.emulate_pr;
-}
-
static struct target_opcode_descriptor tcm_opcode_release = {
.support = SCSI_SUPPORT_FULL,
.opcode = RELEASE,
.cdb_size = 6,
.usage_bits = {RELEASE, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
- .enabled = tcm_is_scsi2_reservations_enabled,
+ .enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_release10 = {
@@ -1811,7 +1842,7 @@ static struct target_opcode_descriptor tcm_opcode_release10 = {
.usage_bits = {RELEASE_10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
- .enabled = tcm_is_scsi2_reservations_enabled,
+ .enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_reserve = {
@@ -1820,7 +1851,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve = {
.cdb_size = 6,
.usage_bits = {RESERVE, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
- .enabled = tcm_is_scsi2_reservations_enabled,
+ .enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_reserve10 = {
@@ -1830,7 +1861,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve10 = {
.usage_bits = {RESERVE_10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
- .enabled = tcm_is_scsi2_reservations_enabled,
+ .enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_request_sense = {
@@ -1849,7 +1880,8 @@ static struct target_opcode_descriptor tcm_opcode_inquiry = {
0xff, SCSI_CONTROL_MASK},
};
-static bool tcm_is_3pc_enabled(struct se_cmd *cmd)
+static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1910,8 +1942,8 @@ static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-
-static bool spc_rsoc_enabled(struct se_cmd *cmd)
+static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1931,7 +1963,8 @@ static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
.enabled = spc_rsoc_enabled,
};
-static bool tcm_is_set_tpg_enabled(struct se_cmd *cmd)
+static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct t10_alua_tg_pt_gp *l_tg_pt_gp;
struct se_lun *l_lun = cmd->se_lun;
@@ -2118,7 +2151,7 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
if (descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
- if (!descr->enabled || descr->enabled(cmd))
+ if (!descr->enabled || descr->enabled(descr, cmd))
*opcode = descr;
break;
case 0x2:
@@ -2132,7 +2165,8 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
*/
if (descr->serv_action_valid &&
descr->service_action == requested_sa) {
- if (!descr->enabled || descr->enabled(cmd))
+ if (!descr->enabled || descr->enabled(descr,
+ cmd))
*opcode = descr;
} else if (!descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
@@ -2145,7 +2179,8 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
* be returned in the one_command parameter data format.
*/
if (descr->service_action == requested_sa)
- if (!descr->enabled || descr->enabled(cmd))
+ if (!descr->enabled || descr->enabled(descr,
+ cmd))
*opcode = descr;
break;
}
@@ -2202,7 +2237,7 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
descr = tcm_supported_opcodes[i];
- if (descr->enabled && !descr->enabled(cmd))
+ if (descr->enabled && !descr->enabled(descr, cmd))
continue;
response_length += spc_rsoc_encode_command_descriptor(
@@ -2231,12 +2266,22 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
- if (!dev->dev_attrib.emulate_pr &&
- ((cdb[0] == PERSISTENT_RESERVE_IN) ||
- (cdb[0] == PERSISTENT_RESERVE_OUT) ||
- (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
- (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
- return TCM_UNSUPPORTED_SCSI_OPCODE;
+ switch (cdb[0]) {
+ case RESERVE:
+ case RESERVE_10:
+ case RELEASE:
+ case RELEASE_10:
+ if (!dev->dev_attrib.emulate_pr)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
+ case PERSISTENT_RESERVE_IN:
+ case PERSISTENT_RESERVE_OUT:
+ if (!dev->dev_attrib.emulate_pr)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
}
switch (cdb[0]) {