summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c148
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c24
-rw-r--r--drivers/scsi/sd.c77
-rw-r--r--drivers/scsi/sd.h66
-rw-r--r--drivers/scsi/sd_dif.c353
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/virtio_scsi.c42
-rw-r--r--drivers/scsi/vmw_pvscsi.h1
-rw-r--r--drivers/scsi/xen-scsifront.c1026
24 files changed, 1265 insertions, 516 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e85e64a07d02..3a820f61ce65 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -73,7 +73,6 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
- select CRC_T10DIF if BLK_DEV_INTEGRITY
---help---
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
@@ -587,6 +586,16 @@ config VMWARE_PVSCSI
To compile this driver as a module, choose M here: the
module will be called vmw_pvscsi.
+config XEN_SCSI_FRONTEND
+ tristate "XEN SCSI frontend driver"
+ depends on SCSI && XEN
+ select XEN_XENBUS_FRONTEND
+ help
+ The XEN SCSI frontend driver allows the kernel to access SCSI Devices
+ within another guest OS (usually Dom0).
+ Only needed if the kernel is running in a XEN guest and generic
+ SCSI access to a device is needed.
+
config HYPERV_STORAGE
tristate "Microsoft Hyper-V virtual storage driver"
depends on SCSI && HYPERV
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 5f0d299b0093..59f1ce6df2d6 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
+obj-$(CONFIG_XEN_SCSI_FRONTEND) += xen-scsifront.o
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 02e69e7ee4a3..3e0a0d315f72 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -259,6 +259,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
}
+#if IS_ENABLED(CONFIG_IPV6)
static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
@@ -344,6 +345,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
}
+#endif
static void send_close_req(struct cxgbi_sock *csk)
{
@@ -756,7 +758,7 @@ static int act_open_rpl_status_to_errno(int status)
static void csk_act_open_retry_timer(unsigned long data)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
@@ -781,9 +783,11 @@ static void csk_act_open_retry_timer(unsigned long data)
if (csk->csk_family == AF_INET) {
send_act_open_func = send_act_open_req;
skb = alloc_wr(size, 0, GFP_ATOMIC);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
send_act_open_func = send_act_open_req6;
skb = alloc_wr(size6, 0, GFP_ATOMIC);
+#endif
}
if (!skb)
@@ -1313,11 +1317,6 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
- n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
- if (!n) {
- pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
- goto rel_resource;
- }
csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
@@ -1335,8 +1334,10 @@ static int init_act_open(struct cxgbi_sock *csk)
if (csk->csk_family == AF_INET)
skb = alloc_wr(size, 0, GFP_NOIO);
+#if IS_ENABLED(CONFIG_IPV6)
else
skb = alloc_wr(size6, 0, GFP_NOIO);
+#endif
if (!skb)
goto rel_resource;
@@ -1370,8 +1371,10 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
if (csk->csk_family == AF_INET)
send_act_open_req(csk, skb, csk->l2t);
+#if IS_ENABLED(CONFIG_IPV6)
else
send_act_open_req6(csk, skb, csk->l2t);
+#endif
neigh_release(n);
return 0;
@@ -1635,129 +1638,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
return 0;
}
-#if IS_ENABLED(CONFIG_IPV6)
-static int cxgbi_inet6addr_handler(struct notifier_block *this,
- unsigned long event, void *data)
-{
- struct inet6_ifaddr *ifa = data;
- struct net_device *event_dev = ifa->idev->dev;
- struct cxgbi_device *cdev;
- int ret = NOTIFY_DONE;
-
- if (event_dev->priv_flags & IFF_802_1Q_VLAN)
- event_dev = vlan_dev_real_dev(event_dev);
-
- cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL);
-
- if (!cdev)
- return ret;
-
- switch (event) {
- case NETDEV_UP:
- ret = cxgb4_clip_get(event_dev,
- (const struct in6_addr *)
- ((ifa)->addr.s6_addr));
- if (ret < 0)
- return ret;
-
- ret = NOTIFY_OK;
- break;
-
- case NETDEV_DOWN:
- cxgb4_clip_release(event_dev,
- (const struct in6_addr *)
- ((ifa)->addr.s6_addr));
- ret = NOTIFY_OK;
- break;
-
- default:
- break;
- }
-
- return ret;
-}
-
-static struct notifier_block cxgbi_inet6addr_notifier = {
- .notifier_call = cxgbi_inet6addr_handler
-};
-
-/* Retrieve IPv6 addresses from a root device (bond, vlan) associated with
- * a physical device.
- * The physical device reference is needed to send the actual CLIP command.
- */
-static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
-{
- struct inet6_dev *idev = NULL;
- struct inet6_ifaddr *ifa;
- int ret = 0;
-
- idev = __in6_dev_get(root_dev);
- if (!idev)
- return ret;
-
- read_lock_bh(&idev->lock);
- list_for_each_entry(ifa, &idev->addr_list, if_list) {
- pr_info("updating the clip for addr %pI6\n",
- ifa->addr.s6_addr);
- ret = cxgb4_clip_get(dev, (const struct in6_addr *)
- ifa->addr.s6_addr);
- if (ret < 0)
- break;
- }
-
- read_unlock_bh(&idev->lock);
- return ret;
-}
-
-static int update_root_dev_clip(struct net_device *dev)
-{
- struct net_device *root_dev = NULL;
- int i, ret = 0;
-
- /* First populate the real net device's IPv6 address */
- ret = update_dev_clip(dev, dev);
- if (ret)
- return ret;
-
- /* Parse all bond and vlan devices layered on top of the physical dev */
- root_dev = netdev_master_upper_dev_get(dev);
- if (root_dev) {
- ret = update_dev_clip(root_dev, dev);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < VLAN_N_VID; i++) {
- root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
- if (!root_dev)
- continue;
-
- ret = update_dev_clip(root_dev, dev);
- if (ret)
- break;
- }
- return ret;
-}
-
-static void cxgbi_update_clip(struct cxgbi_device *cdev)
-{
- int i;
-
- rcu_read_lock();
-
- for (i = 0; i < cdev->nports; i++) {
- struct net_device *dev = cdev->ports[i];
- int ret = 0;
-
- if (dev)
- ret = update_root_dev_clip(dev);
- if (ret < 0)
- break;
- }
- rcu_read_unlock();
-}
-#endif /* IS_ENABLED(CONFIG_IPV6) */
-
static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
{
struct cxgbi_device *cdev;
@@ -1876,10 +1756,6 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
switch (state) {
case CXGB4_STATE_UP:
pr_info("cdev 0x%p, UP.\n", cdev);
-#if IS_ENABLED(CONFIG_IPV6)
- cxgbi_update_clip(cdev);
-#endif
- /* re-initialize */
break;
case CXGB4_STATE_START_RECOVERY:
pr_info("cdev 0x%p, RECOVERY.\n", cdev);
@@ -1910,17 +1786,11 @@ static int __init cxgb4i_init_module(void)
return rc;
cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
-#if IS_ENABLED(CONFIG_IPV6)
- register_inet6addr_notifier(&cxgbi_inet6addr_notifier);
-#endif
return 0;
}
static void __exit cxgb4i_exit_module(void)
{
-#if IS_ENABLED(CONFIG_IPV6)
- unregister_inet6addr_notifier(&cxgbi_inet6addr_notifier);
-#endif
cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 6a2001d6b442..54fa6e0bc1bb 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -275,6 +275,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
}
EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
+#if IS_ENABLED(CONFIG_IPV6)
static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
int *port)
{
@@ -307,6 +308,7 @@ static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
ndev, ndev->name);
return NULL;
}
+#endif
void cxgbi_hbas_remove(struct cxgbi_device *cdev)
{
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 7bcf67eec921..e99507ed0e3c 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -115,7 +115,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,
rq = blk_get_request(q, rw, GFP_NOIO);
- if (!rq) {
+ if (IS_ERR(rq)) {
sdev_printk(KERN_INFO, sdev,
"%s: blk_get_request failed\n", __func__);
return NULL;
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 6f07f7fe3aa1..84765384c47c 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -275,7 +275,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
rq = blk_get_request(sdev->request_queue,
(cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
- if (!rq) {
+ if (IS_ERR(rq)) {
sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
return NULL;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index e9d9fea9e272..4ee2759f5299 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -117,7 +117,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
retry:
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
- if (!req)
+ if (IS_ERR(req))
return SCSI_DH_RES_TEMP_UNAVAIL;
blk_rq_set_block_pc(req);
@@ -247,7 +247,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
struct request *req;
req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
- if (!req)
+ if (IS_ERR(req))
return SCSI_DH_RES_TEMP_UNAVAIL;
blk_rq_set_block_pc(req);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 826069db9848..1b5bc9293e37 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -274,7 +274,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
rq = blk_get_request(q, rw, GFP_NOIO);
- if (!rq) {
+ if (IS_ERR(rq)) {
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_get_request failed.\n");
return NULL;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 00ee0ed642aa..4a8ac7d8c76b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1884,7 +1884,6 @@ retry:
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&p->fcoe_rx_list.lock);
schedule();
- set_current_state(TASK_RUNNING);
goto retry;
}
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 52a216f21ae5..e5afc3884d74 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -528,7 +528,7 @@ ips_setup(char *ips_str)
* Update the variables
*/
for (i = 0; i < ARRAY_SIZE(options); i++) {
- if (strnicmp
+ if (strncasecmp
(key, options[i].option_name,
strlen(options[i].option_name)) == 0) {
if (value)
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 5f4cbf0c4759..fd19fd8468ac 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1567,8 +1567,8 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
struct request *req;
req = blk_get_request(q, has_write ? WRITE : READ, flags);
- if (unlikely(!req))
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(req))
+ return req;
blk_rq_set_block_pc(req);
return req;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 0727ea7cc387..dff37a250d79 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -362,7 +362,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
int write = (data_direction == DMA_TO_DEVICE);
req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
- if (!req)
+ if (IS_ERR(req))
return DRIVER_ERROR << 24;
blk_rq_set_block_pc(req);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index dabd25429c58..db3dbd999cb6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4875,7 +4875,6 @@ qla2x00_do_dpc(void *data)
"DPC handler sleeping.\n");
schedule();
- __set_current_state(TASK_RUNNING);
if (!base_vha->flags.init_done || ha->flags.mbox_busy)
goto end_loop;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2b6d447ad6d6..238e06f13b8a 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -3371,7 +3371,7 @@ static ssize_t opts_store(struct device_driver *ddp, const char *buf,
char work[20];
if (1 == sscanf(buf, "%10s", work)) {
- if (0 == strnicmp(work,"0x", 2)) {
+ if (0 == strncasecmp(work,"0x", 2)) {
if (1 == sscanf(&work[2], "%x", &opts))
goto opts_done;
} else {
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6b20ef3fee54..9a6f8468225f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1961,6 +1961,8 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
* request becomes available
*/
req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
+ if (IS_ERR(req))
+ return;
blk_rq_set_block_pc(req);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index db8c449282f9..9eff8a375132 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -221,7 +221,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int ret = DRIVER_ERROR << 24;
req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
- if (!req)
+ if (IS_ERR(req))
return ret;
blk_rq_set_block_pc(req);
@@ -715,7 +715,7 @@ static bool scsi_end_request(struct request *req, int error,
if (req->mq_ctx) {
/*
- * In the MQ case the command gets freed by __blk_mq_end_io,
+ * In the MQ case the command gets freed by __blk_mq_end_request,
* so we have to do all cleanup that depends on it earlier.
*
* We also can't kick the queues from irq context, so we
@@ -723,7 +723,7 @@ static bool scsi_end_request(struct request *req, int error,
*/
scsi_mq_uninit_cmd(cmd);
- __blk_mq_end_io(req, error);
+ __blk_mq_end_request(req, error);
if (scsi_target(sdev)->single_lun ||
!list_empty(&sdev->host->starved_list))
@@ -1847,6 +1847,8 @@ static int scsi_mq_prep_fn(struct request *req)
next_rq->special = bidi_sdb;
}
+ blk_mq_start_request(req);
+
return scsi_setup_cmnd(sdev, req);
}
@@ -1856,7 +1858,8 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
blk_mq_complete_request(cmd->request);
}
-static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
+static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
+ bool last)
{
struct request_queue *q = req->q;
struct scsi_device *sdev = q->queuedata;
@@ -1880,11 +1883,14 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
+
if (!(req->cmd_flags & REQ_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
if (ret)
goto out_dec_host_busy;
req->cmd_flags |= REQ_DONTPREP;
+ } else {
+ blk_mq_start_request(req);
}
scsi_init_cmd_errh(cmd);
@@ -1931,6 +1937,14 @@ out:
return ret;
}
+static enum blk_eh_timer_return scsi_timeout(struct request *req,
+ bool reserved)
+{
+ if (reserved)
+ return BLK_EH_RESET_TIMER;
+ return scsi_times_out(req);
+}
+
static int scsi_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
@@ -2042,7 +2056,7 @@ static struct blk_mq_ops scsi_mq_ops = {
.map_queue = blk_mq_map_queue,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
- .timeout = scsi_times_out,
+ .timeout = scsi_timeout,
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
};
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0cb5c9f0c743..cfba74cd8e8b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -610,29 +610,44 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
mutex_unlock(&sd_ref_mutex);
}
-static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
-{
- unsigned int prot_op = SCSI_PROT_NORMAL;
- unsigned int dix = scsi_prot_sg_count(scmd);
-
- if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
- if (dif && dix)
- prot_op = SCSI_PROT_READ_PASS;
- else if (dif && !dix)
- prot_op = SCSI_PROT_READ_STRIP;
- else if (!dif && dix)
- prot_op = SCSI_PROT_READ_INSERT;
- } else {
- if (dif && dix)
- prot_op = SCSI_PROT_WRITE_PASS;
- else if (dif && !dix)
- prot_op = SCSI_PROT_WRITE_INSERT;
- else if (!dif && dix)
- prot_op = SCSI_PROT_WRITE_STRIP;
+
+
+static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
+ unsigned int dix, unsigned int dif)
+{
+ struct bio *bio = scmd->request->bio;
+ unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
+ unsigned int protect = 0;
+
+ if (dix) { /* DIX Type 0, 1, 2, 3 */
+ if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
+ scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
+
+ if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
+ }
+
+ if (dif != SD_DIF_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
+ scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
+
+ if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ scmd->prot_flags |= SCSI_PROT_REF_CHECK;
+ }
+
+ if (dif) { /* DIX/DIF Type 1, 2, 3 */
+ scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
+
+ if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
+ protect = 3 << 5; /* Disable target PI checking */
+ else
+ protect = 1 << 5; /* Enable target PI checking */
}
scsi_set_prot_op(scmd, prot_op);
scsi_set_prot_type(scmd, dif);
+ scmd->prot_flags &= sd_prot_flag_mask(prot_op);
+
+ return protect;
}
static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
@@ -893,7 +908,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
sector_t block = blk_rq_pos(rq);
sector_t threshold;
unsigned int this_count = blk_rq_sectors(rq);
- int ret, host_dif;
+ unsigned int dif, dix;
+ int ret;
unsigned char protect;
ret = scsi_init_io(SCpnt, GFP_ATOMIC);
@@ -995,7 +1011,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
SCpnt->cmnd[0] = WRITE_6;
if (blk_integrity_rq(rq))
- sd_dif_prepare(rq, block, sdp->sector_size);
+ sd_dif_prepare(SCpnt);
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
@@ -1010,14 +1026,15 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
"writing" : "reading", this_count,
blk_rq_sectors(rq)));
- /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
- host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
- if (host_dif)
- protect = 1 << 5;
+ dix = scsi_prot_sg_count(SCpnt);
+ dif = scsi_host_dif_capable(SCpnt->device->host, sdkp->protection_type);
+
+ if (dif || dix)
+ protect = sd_setup_protect_cmnd(SCpnt, dix, dif);
else
protect = 0;
- if (host_dif == SD_DIF_TYPE2_PROTECTION) {
+ if (protect && sdkp->protection_type == SD_DIF_TYPE2_PROTECTION) {
SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
if (unlikely(SCpnt->cmnd == NULL)) {
@@ -1102,10 +1119,6 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
}
SCpnt->sdb.length = this_count * sdp->sector_size;
- /* If DIF or DIX is enabled, tell HBA how to handle request */
- if (host_dif || scsi_prot_sg_count(SCpnt))
- sd_prot_op(SCpnt, host_dif);
-
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
* host adapter, it's safe to assume that we can at least transfer
@@ -2664,8 +2677,10 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
rot = get_unaligned_be16(&buffer[4]);
- if (rot == 1)
+ if (rot == 1) {
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
+ }
out:
kfree(buffer);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 4c3ab8377fd3..467377884b63 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -167,6 +167,68 @@ enum sd_dif_target_protection_types {
};
/*
+ * Look up the DIX operation based on whether the command is read or
+ * write and whether dix and dif are enabled.
+ */
+static inline unsigned int sd_prot_op(bool write, bool dix, bool dif)
+{
+ /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
+ const unsigned int ops[] = { /* wrt dix dif */
+ SCSI_PROT_NORMAL, /* 0 0 0 */
+ SCSI_PROT_READ_STRIP, /* 0 0 1 */
+ SCSI_PROT_READ_INSERT, /* 0 1 0 */
+ SCSI_PROT_READ_PASS, /* 0 1 1 */
+ SCSI_PROT_NORMAL, /* 1 0 0 */
+ SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
+ SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
+ SCSI_PROT_WRITE_PASS, /* 1 1 1 */
+ };
+
+ return ops[write << 2 | dix << 1 | dif];
+}
+
+/*
+ * Returns a mask of the protection flags that are valid for a given DIX
+ * operation.
+ */
+static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
+{
+ const unsigned int flag_mask[] = {
+ [SCSI_PROT_NORMAL] = 0,
+
+ [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT,
+
+ [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+
+ [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+
+ [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_REF_INCREMENT,
+
+ [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+
+ [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+ };
+
+ return flag_mask[prot_op];
+}
+
+/*
* Data Integrity Field tuple.
*/
struct sd_dif_tuple {
@@ -178,7 +240,7 @@ struct sd_dif_tuple {
#ifdef CONFIG_BLK_DEV_INTEGRITY
extern void sd_dif_config_host(struct scsi_disk *);
-extern void sd_dif_prepare(struct request *rq, sector_t, unsigned int);
+extern void sd_dif_prepare(struct scsi_cmnd *scmd);
extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
#else /* CONFIG_BLK_DEV_INTEGRITY */
@@ -186,7 +248,7 @@ extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
static inline void sd_dif_config_host(struct scsi_disk *disk)
{
}
-static inline int sd_dif_prepare(struct request *rq, sector_t s, unsigned int a)
+static inline int sd_dif_prepare(struct scsi_cmnd *scmd)
{
return 0;
}
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index a7a691d0af7d..14c7d42a11c2 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -21,7 +21,7 @@
*/
#include <linux/blkdev.h>
-#include <linux/crc-t10dif.h>
+#include <linux/t10-pi.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -33,268 +33,8 @@
#include <scsi/scsi_ioctl.h>
#include <scsi/scsicam.h>
-#include <net/checksum.h>
-
#include "sd.h"
-typedef __u16 (csum_fn) (void *, unsigned int);
-
-static __u16 sd_dif_crc_fn(void *data, unsigned int len)
-{
- return cpu_to_be16(crc_t10dif(data, len));
-}
-
-static __u16 sd_dif_ip_fn(void *data, unsigned int len)
-{
- return ip_compute_csum(data, len);
-}
-
-/*
- * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
- * 16 bit app tag, 32 bit reference tag.
- */
-static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
-{
- void *buf = bix->data_buf;
- struct sd_dif_tuple *sdt = bix->prot_buf;
- sector_t sector = bix->sector;
- unsigned int i;
-
- for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
- sdt->guard_tag = fn(buf, bix->sector_size);
- sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
- sdt->app_tag = 0;
-
- buf += bix->sector_size;
- sector++;
- }
-}
-
-static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix)
-{
- sd_dif_type1_generate(bix, sd_dif_crc_fn);
-}
-
-static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix)
-{
- sd_dif_type1_generate(bix, sd_dif_ip_fn);
-}
-
-static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
-{
- void *buf = bix->data_buf;
- struct sd_dif_tuple *sdt = bix->prot_buf;
- sector_t sector = bix->sector;
- unsigned int i;
- __u16 csum;
-
- for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
- /* Unwritten sectors */
- if (sdt->app_tag == 0xffff)
- return 0;
-
- if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
- printk(KERN_ERR
- "%s: ref tag error on sector %lu (rcvd %u)\n",
- bix->disk_name, (unsigned long)sector,
- be32_to_cpu(sdt->ref_tag));
- return -EIO;
- }
-
- csum = fn(buf, bix->sector_size);
-
- if (sdt->guard_tag != csum) {
- printk(KERN_ERR "%s: guard tag error on sector %lu " \
- "(rcvd %04x, data %04x)\n", bix->disk_name,
- (unsigned long)sector,
- be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
- return -EIO;
- }
-
- buf += bix->sector_size;
- sector++;
- }
-
- return 0;
-}
-
-static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix)
-{
- return sd_dif_type1_verify(bix, sd_dif_crc_fn);
-}
-
-static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
-{
- return sd_dif_type1_verify(bix, sd_dif_ip_fn);
-}
-
-/*
- * Functions for interleaving and deinterleaving application tags
- */
-static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
-{
- struct sd_dif_tuple *sdt = prot;
- u8 *tag = tag_buf;
- unsigned int i, j;
-
- for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
- sdt->app_tag = tag[j] << 8 | tag[j+1];
- BUG_ON(sdt->app_tag == 0xffff);
- }
-}
-
-static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
-{
- struct sd_dif_tuple *sdt = prot;
- u8 *tag = tag_buf;
- unsigned int i, j;
-
- for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
- tag[j] = (sdt->app_tag & 0xff00) >> 8;
- tag[j+1] = sdt->app_tag & 0xff;
- }
-}
-
-static struct blk_integrity dif_type1_integrity_crc = {
- .name = "T10-DIF-TYPE1-CRC",
- .generate_fn = sd_dif_type1_generate_crc,
- .verify_fn = sd_dif_type1_verify_crc,
- .get_tag_fn = sd_dif_type1_get_tag,
- .set_tag_fn = sd_dif_type1_set_tag,
- .tuple_size = sizeof(struct sd_dif_tuple),
- .tag_size = 0,
-};
-
-static struct blk_integrity dif_type1_integrity_ip = {
- .name = "T10-DIF-TYPE1-IP",
- .generate_fn = sd_dif_type1_generate_ip,
- .verify_fn = sd_dif_type1_verify_ip,
- .get_tag_fn = sd_dif_type1_get_tag,
- .set_tag_fn = sd_dif_type1_set_tag,
- .tuple_size = sizeof(struct sd_dif_tuple),
- .tag_size = 0,
-};
-
-
-/*
- * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
- * tag space.
- */
-static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
-{
- void *buf = bix->data_buf;
- struct sd_dif_tuple *sdt = bix->prot_buf;
- unsigned int i;
-
- for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
- sdt->guard_tag = fn(buf, bix->sector_size);
- sdt->ref_tag = 0;
- sdt->app_tag = 0;
-
- buf += bix->sector_size;
- }
-}
-
-static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix)
-{
- sd_dif_type3_generate(bix, sd_dif_crc_fn);
-}
-
-static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix)
-{
- sd_dif_type3_generate(bix, sd_dif_ip_fn);
-}
-
-static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
-{
- void *buf = bix->data_buf;
- struct sd_dif_tuple *sdt = bix->prot_buf;
- sector_t sector = bix->sector;
- unsigned int i;
- __u16 csum;
-
- for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
- /* Unwritten sectors */
- if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff)
- return 0;
-
- csum = fn(buf, bix->sector_size);
-
- if (sdt->guard_tag != csum) {
- printk(KERN_ERR "%s: guard tag error on sector %lu " \
- "(rcvd %04x, data %04x)\n", bix->disk_name,
- (unsigned long)sector,
- be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
- return -EIO;
- }
-
- buf += bix->sector_size;
- sector++;
- }
-
- return 0;
-}
-
-static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix)
-{
- return sd_dif_type3_verify(bix, sd_dif_crc_fn);
-}
-
-static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
-{
- return sd_dif_type3_verify(bix, sd_dif_ip_fn);
-}
-
-static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
-{
- struct sd_dif_tuple *sdt = prot;
- u8 *tag = tag_buf;
- unsigned int i, j;
-
- for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
- sdt->app_tag = tag[j] << 8 | tag[j+1];
- sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 |
- tag[j+4] << 8 | tag[j+5];
- }
-}
-
-static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
-{
- struct sd_dif_tuple *sdt = prot;
- u8 *tag = tag_buf;
- unsigned int i, j;
-
- for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
- tag[j] = (sdt->app_tag & 0xff00) >> 8;
- tag[j+1] = sdt->app_tag & 0xff;
- tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24;
- tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16;
- tag[j+4] = (sdt->ref_tag & 0xff00) >> 8;
- tag[j+5] = sdt->ref_tag & 0xff;
- BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff);
- }
-}
-
-static struct blk_integrity dif_type3_integrity_crc = {
- .name = "T10-DIF-TYPE3-CRC",
- .generate_fn = sd_dif_type3_generate_crc,
- .verify_fn = sd_dif_type3_verify_crc,
- .get_tag_fn = sd_dif_type3_get_tag,
- .set_tag_fn = sd_dif_type3_set_tag,
- .tuple_size = sizeof(struct sd_dif_tuple),
- .tag_size = 0,
-};
-
-static struct blk_integrity dif_type3_integrity_ip = {
- .name = "T10-DIF-TYPE3-IP",
- .generate_fn = sd_dif_type3_generate_ip,
- .verify_fn = sd_dif_type3_verify_ip,
- .get_tag_fn = sd_dif_type3_get_tag,
- .set_tag_fn = sd_dif_type3_set_tag,
- .tuple_size = sizeof(struct sd_dif_tuple),
- .tag_size = 0,
-};
-
/*
* Configure exchange of protection information between OS and HBA.
*/
@@ -316,22 +56,30 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
return;
/* Enable DMA of protection information */
- if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
+ if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
if (type == SD_DIF_TYPE3_PROTECTION)
- blk_integrity_register(disk, &dif_type3_integrity_ip);
+ blk_integrity_register(disk, &t10_pi_type3_ip);
else
- blk_integrity_register(disk, &dif_type1_integrity_ip);
- else
+ blk_integrity_register(disk, &t10_pi_type1_ip);
+
+ disk->integrity->flags |= BLK_INTEGRITY_IP_CHECKSUM;
+ } else
if (type == SD_DIF_TYPE3_PROTECTION)
- blk_integrity_register(disk, &dif_type3_integrity_crc);
+ blk_integrity_register(disk, &t10_pi_type3_crc);
else
- blk_integrity_register(disk, &dif_type1_integrity_crc);
+ blk_integrity_register(disk, &t10_pi_type1_crc);
sd_printk(KERN_NOTICE, sdkp,
"Enabling DIX %s protection\n", disk->integrity->name);
/* Signal to block layer that we support sector tagging */
- if (dif && type && sdkp->ATO) {
+ if (dif && type) {
+
+ disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+
+ if (!sdkp)
+ return;
+
if (type == SD_DIF_TYPE3_PROTECTION)
disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
else
@@ -358,50 +106,49 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
*
* Type 3 does not have a reference tag so no remapping is required.
*/
-void sd_dif_prepare(struct request *rq, sector_t hw_sector,
- unsigned int sector_sz)
+void sd_dif_prepare(struct scsi_cmnd *scmd)
{
- const int tuple_sz = sizeof(struct sd_dif_tuple);
+ const int tuple_sz = sizeof(struct t10_pi_tuple);
struct bio *bio;
struct scsi_disk *sdkp;
- struct sd_dif_tuple *sdt;
+ struct t10_pi_tuple *pi;
u32 phys, virt;
- sdkp = rq->bio->bi_bdev->bd_disk->private_data;
+ sdkp = scsi_disk(scmd->request->rq_disk);
if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
return;
- phys = hw_sector & 0xffffffff;
+ phys = scsi_prot_ref_tag(scmd);
- __rq_for_each_bio(bio, rq) {
+ __rq_for_each_bio(bio, scmd->request) {
+ struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_vec iv;
struct bvec_iter iter;
unsigned int j;
/* Already remapped? */
- if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
+ if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
break;
- virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
+ virt = bip_get_seed(bip) & 0xffffffff;
- bip_for_each_vec(iv, bio->bi_integrity, iter) {
- sdt = kmap_atomic(iv.bv_page)
- + iv.bv_offset;
+ bip_for_each_vec(iv, bip, iter) {
+ pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
- for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
+ for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
- if (be32_to_cpu(sdt->ref_tag) == virt)
- sdt->ref_tag = cpu_to_be32(phys);
+ if (be32_to_cpu(pi->ref_tag) == virt)
+ pi->ref_tag = cpu_to_be32(phys);
virt++;
phys++;
}
- kunmap_atomic(sdt);
+ kunmap_atomic(pi);
}
- bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
+ bip->bip_flags |= BIP_MAPPED_INTEGRITY;
}
}
@@ -411,11 +158,11 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
*/
void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
{
- const int tuple_sz = sizeof(struct sd_dif_tuple);
+ const int tuple_sz = sizeof(struct t10_pi_tuple);
struct scsi_disk *sdkp;
struct bio *bio;
- struct sd_dif_tuple *sdt;
- unsigned int j, sectors, sector_sz;
+ struct t10_pi_tuple *pi;
+ unsigned int j, intervals;
u32 phys, virt;
sdkp = scsi_disk(scmd->request->rq_disk);
@@ -423,39 +170,35 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
return;
- sector_sz = scmd->device->sector_size;
- sectors = good_bytes / sector_sz;
-
- phys = blk_rq_pos(scmd->request) & 0xffffffff;
- if (sector_sz == 4096)
- phys >>= 3;
+ intervals = good_bytes / scsi_prot_interval(scmd);
+ phys = scsi_prot_ref_tag(scmd);
__rq_for_each_bio(bio, scmd->request) {
+ struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_vec iv;
struct bvec_iter iter;
- virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
+ virt = bip_get_seed(bip) & 0xffffffff;
- bip_for_each_vec(iv, bio->bi_integrity, iter) {
- sdt = kmap_atomic(iv.bv_page)
- + iv.bv_offset;
+ bip_for_each_vec(iv, bip, iter) {
+ pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
- for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
+ for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
- if (sectors == 0) {
- kunmap_atomic(sdt);
+ if (intervals == 0) {
+ kunmap_atomic(pi);
return;
}
- if (be32_to_cpu(sdt->ref_tag) == phys)
- sdt->ref_tag = cpu_to_be32(virt);
+ if (be32_to_cpu(pi->ref_tag) == phys)
+ pi->ref_tag = cpu_to_be32(virt);
virt++;
phys++;
- sectors--;
+ intervals--;
}
- kunmap_atomic(sdt);
+ kunmap_atomic(pi);
}
}
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 01cf88888797..60354449d9ed 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1711,9 +1711,9 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
}
rq = blk_get_request(q, rw, GFP_ATOMIC);
- if (!rq) {
+ if (IS_ERR(rq)) {
kfree(long_cmdp);
- return -ENOMEM;
+ return PTR_ERR(rq);
}
blk_rq_set_block_pc(rq);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index d3fd6e8fb378..4daa372ed381 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -490,7 +490,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
req = blk_get_request(SRpnt->stp->device->request_queue, write,
GFP_KERNEL);
- if (!req)
+ if (IS_ERR(req))
return DRIVER_ERROR << 24;
blk_rq_set_block_pc(req);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index eee1bc0b506e..b83846fc7859 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -110,6 +110,9 @@ struct virtio_scsi {
/* CPU hotplug notifier */
struct notifier_block nb;
+ /* Protected by event_vq lock */
+ bool stop_events;
+
struct virtio_scsi_vq ctrl_vq;
struct virtio_scsi_vq event_vq;
struct virtio_scsi_vq req_vqs[];
@@ -303,6 +306,11 @@ static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
{
int i;
+ /* Stop scheduling work before calling cancel_work_sync. */
+ spin_lock_irq(&vscsi->event_vq.vq_lock);
+ vscsi->stop_events = true;
+ spin_unlock_irq(&vscsi->event_vq.vq_lock);
+
for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
cancel_work_sync(&vscsi->event_list[i].work);
}
@@ -390,7 +398,8 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
{
struct virtio_scsi_event_node *event_node = buf;
- schedule_work(&event_node->work);
+ if (!vscsi->stop_events)
+ queue_work(system_freezable_wq, &event_node->work);
}
static void virtscsi_event_done(struct virtqueue *vq)
@@ -851,13 +860,6 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
virtscsi_vq->vq = vq;
}
-static void virtscsi_scan(struct virtio_device *vdev)
-{
- struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
-
- scsi_scan_host(shost);
-}
-
static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
struct Scsi_Host *sh = virtio_scsi_host(vdev);
@@ -916,9 +918,6 @@ static int virtscsi_init(struct virtio_device *vdev,
virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
- if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
- virtscsi_kick_event_all(vscsi);
-
err = 0;
out:
@@ -997,10 +996,13 @@ static int virtscsi_probe(struct virtio_device *vdev)
err = scsi_add_host(shost, &vdev->dev);
if (err)
goto scsi_add_host_failed;
- /*
- * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
- * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
- */
+
+ virtio_device_ready(vdev);
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
+ virtscsi_kick_event_all(vscsi);
+
+ scsi_scan_host(shost);
return 0;
scsi_add_host_failed:
@@ -1048,8 +1050,15 @@ static int virtscsi_restore(struct virtio_device *vdev)
return err;
err = register_hotcpu_notifier(&vscsi->nb);
- if (err)
+ if (err) {
vdev->config->del_vqs(vdev);
+ return err;
+ }
+
+ virtio_device_ready(vdev);
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
+ virtscsi_kick_event_all(vscsi);
return err;
}
@@ -1073,7 +1082,6 @@ static struct virtio_driver virtio_scsi_driver = {
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtscsi_probe,
- .scan = virtscsi_scan,
#ifdef CONFIG_PM_SLEEP
.freeze = virtscsi_freeze,
.restore = virtscsi_restore,
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index ce4588851274..ee16f0c5c47d 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -32,7 +32,6 @@
#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
-#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
/*
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
new file mode 100644
index 000000000000..34199d206ba6
--- /dev/null
+++ b/drivers/scsi/xen-scsifront.c
@@ -0,0 +1,1026 @@
+/*
+ * Xen SCSI frontend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/vscsiif.h>
+#include <xen/interface/io/protocols.h>
+
+#include <asm/xen/hypervisor.h>
+
+
+#define GRANT_INVALID_REF 0
+
+#define VSCSIFRONT_OP_ADD_LUN 1
+#define VSCSIFRONT_OP_DEL_LUN 2
+
+/* Tuning point. */
+#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
+#define VSCSIIF_MAX_TARGET 64
+#define VSCSIIF_MAX_LUN 255
+
+#define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
+#define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
+
+#define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
+ sizeof(struct scsiif_request_segment)))
+
+struct vscsifrnt_shadow {
+ /* command between backend and frontend */
+ unsigned char act;
+ uint16_t rqid;
+
+ unsigned int nr_grants; /* number of grants in gref[] */
+ struct scsiif_request_segment *sg; /* scatter/gather elements */
+
+ /* Do reset or abort function. */
+ wait_queue_head_t wq_reset; /* reset work queue */
+ int wait_reset; /* reset work queue condition */
+ int32_t rslt_reset; /* reset response status: */
+ /* SUCCESS or FAILED or: */
+#define RSLT_RESET_WAITING 0
+#define RSLT_RESET_ERR -1
+
+ /* Requested struct scsi_cmnd is stored from kernel. */
+ struct scsi_cmnd *sc;
+ int gref[vscsiif_grants_sg(SG_ALL) + SG_ALL];
+};
+
+struct vscsifrnt_info {
+ struct xenbus_device *dev;
+
+ struct Scsi_Host *host;
+ int host_active;
+
+ unsigned int evtchn;
+ unsigned int irq;
+
+ grant_ref_t ring_ref;
+ struct vscsiif_front_ring ring;
+ struct vscsiif_response ring_rsp;
+
+ spinlock_t shadow_lock;
+ DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS);
+ struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS];
+
+ wait_queue_head_t wq_sync;
+ unsigned int wait_ring_available:1;
+
+ char dev_state_path[64];
+ struct task_struct *curr;
+};
+
+static DEFINE_MUTEX(scsifront_mutex);
+
+static void scsifront_wake_up(struct vscsifrnt_info *info)
+{
+ info->wait_ring_available = 0;
+ wake_up(&info->wq_sync);
+}
+
+static int scsifront_get_rqid(struct vscsifrnt_info *info)
+{
+ unsigned long flags;
+ int free;
+
+ spin_lock_irqsave(&info->shadow_lock, flags);
+
+ free = find_first_bit(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
+ __clear_bit(free, info->shadow_free_bitmap);
+
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+ return free;
+}
+
+static int _scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
+{
+ int empty = bitmap_empty(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
+
+ __set_bit(id, info->shadow_free_bitmap);
+ info->shadow[id] = NULL;
+
+ return empty || info->wait_ring_available;
+}
+
+static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
+{
+ unsigned long flags;
+ int kick;
+
+ spin_lock_irqsave(&info->shadow_lock, flags);
+ kick = _scsifront_put_rqid(info, id);
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+ if (kick)
+ scsifront_wake_up(info);
+}
+
+static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+{
+ struct vscsiif_front_ring *ring = &(info->ring);
+ struct vscsiif_request *ring_req;
+ uint32_t id;
+
+ id = scsifront_get_rqid(info); /* use id in response */
+ if (id >= VSCSIIF_MAX_REQS)
+ return NULL;
+
+ ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
+
+ ring->req_prod_pvt++;
+
+ ring_req->rqid = (uint16_t)id;
+
+ return ring_req;
+}
+
+static void scsifront_do_request(struct vscsifrnt_info *info)
+{
+ struct vscsiif_front_ring *ring = &(info->ring);
+ int notify;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
+ if (notify)
+ notify_remote_via_irq(info->irq);
+}
+
+static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+{
+ struct vscsifrnt_shadow *s = info->shadow[id];
+ int i;
+
+ if (s->sc->sc_data_direction == DMA_NONE)
+ return;
+
+ for (i = 0; i < s->nr_grants; i++) {
+ if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+ shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
+ "grant still in use by backend\n");
+ BUG();
+ }
+ gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+ }
+
+ kfree(s->sg);
+}
+
+static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
+ struct vscsiif_response *ring_rsp)
+{
+ struct scsi_cmnd *sc;
+ uint32_t id;
+ uint8_t sense_len;
+
+ id = ring_rsp->rqid;
+ sc = info->shadow[id]->sc;
+
+ BUG_ON(sc == NULL);
+
+ scsifront_gnttab_done(info, id);
+ scsifront_put_rqid(info, id);
+
+ sc->result = ring_rsp->rslt;
+ scsi_set_resid(sc, ring_rsp->residual_len);
+
+ sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
+ ring_rsp->sense_len);
+
+ if (sense_len)
+ memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len);
+
+ sc->scsi_done(sc);
+}
+
+static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
+ struct vscsiif_response *ring_rsp)
+{
+ uint16_t id = ring_rsp->rqid;
+ unsigned long flags;
+ struct vscsifrnt_shadow *shadow = info->shadow[id];
+ int kick;
+
+ spin_lock_irqsave(&info->shadow_lock, flags);
+ shadow->wait_reset = 1;
+ switch (shadow->rslt_reset) {
+ case RSLT_RESET_WAITING:
+ shadow->rslt_reset = ring_rsp->rslt;
+ break;
+ case RSLT_RESET_ERR:
+ kick = _scsifront_put_rqid(info, id);
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+ kfree(shadow);
+ if (kick)
+ scsifront_wake_up(info);
+ return;
+ default:
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
+ "bad reset state %d, possibly leaking %u\n",
+ shadow->rslt_reset, id);
+ break;
+ }
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+ wake_up(&shadow->wq_reset);
+}
+
+static int scsifront_cmd_done(struct vscsifrnt_info *info)
+{
+ struct vscsiif_response *ring_rsp;
+ RING_IDX i, rp;
+ int more_to_do = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(info->host->host_lock, flags);
+
+ rp = info->ring.sring->rsp_prod;
+ rmb(); /* ordering required respective to dom0 */
+ for (i = info->ring.rsp_cons; i != rp; i++) {
+
+ ring_rsp = RING_GET_RESPONSE(&info->ring, i);
+
+ if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
+ test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
+ "illegal rqid %u returned by backend!\n",
+ ring_rsp->rqid))
+ continue;
+
+ if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
+ scsifront_cdb_cmd_done(info, ring_rsp);
+ else
+ scsifront_sync_cmd_done(info, ring_rsp);
+ }
+
+ info->ring.rsp_cons = i;
+
+ if (i != info->ring.req_prod_pvt)
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
+ else
+ info->ring.sring->rsp_event = i + 1;
+
+ info->wait_ring_available = 0;
+
+ spin_unlock_irqrestore(info->host->host_lock, flags);
+
+ wake_up(&info->wq_sync);
+
+ return more_to_do;
+}
+
+static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
+{
+ struct vscsifrnt_info *info = dev_id;
+
+ while (scsifront_cmd_done(info))
+ /* Yield point for this unbounded loop. */
+ cond_resched();
+
+ return IRQ_HANDLED;
+}
+
+static int map_data_for_request(struct vscsifrnt_info *info,
+ struct scsi_cmnd *sc,
+ struct vscsiif_request *ring_req,
+ struct vscsifrnt_shadow *shadow)
+{
+ grant_ref_t gref_head;
+ struct page *page;
+ int err, ref, ref_cnt = 0;
+ int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
+ unsigned int i, off, len, bytes;
+ unsigned int data_len = scsi_bufflen(sc);
+ unsigned int data_grants = 0, seg_grants = 0;
+ struct scatterlist *sg;
+ unsigned long mfn;
+ struct scsiif_request_segment *seg;
+
+ ring_req->nr_segments = 0;
+ if (sc->sc_data_direction == DMA_NONE || !data_len)
+ return 0;
+
+ scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
+ data_grants += PFN_UP(sg->offset + sg->length);
+
+ if (data_grants > VSCSIIF_SG_TABLESIZE) {
+ if (data_grants > info->host->sg_tablesize) {
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
+ "Unable to map request_buffer for command!\n");
+ return -E2BIG;
+ }
+ seg_grants = vscsiif_grants_sg(data_grants);
+ shadow->sg = kcalloc(data_grants,
+ sizeof(struct scsiif_request_segment), GFP_ATOMIC);
+ if (!shadow->sg)
+ return -ENOMEM;
+ }
+ seg = shadow->sg ? : ring_req->seg;
+
+ err = gnttab_alloc_grant_references(seg_grants + data_grants,
+ &gref_head);
+ if (err) {
+ kfree(shadow->sg);
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
+ "gnttab_alloc_grant_references() error\n");
+ return -ENOMEM;
+ }
+
+ if (seg_grants) {
+ page = virt_to_page(seg);
+ off = (unsigned long)seg & ~PAGE_MASK;
+ len = sizeof(struct scsiif_request_segment) * data_grants;
+ while (len > 0) {
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ mfn = pfn_to_mfn(page_to_pfn(page));
+ gnttab_grant_foreign_access_ref(ref,
+ info->dev->otherend_id, mfn, 1);
+ shadow->gref[ref_cnt] = ref;
+ ring_req->seg[ref_cnt].gref = ref;
+ ring_req->seg[ref_cnt].offset = (uint16_t)off;
+ ring_req->seg[ref_cnt].length = (uint16_t)bytes;
+
+ page++;
+ len -= bytes;
+ off = 0;
+ ref_cnt++;
+ }
+ BUG_ON(seg_grants < ref_cnt);
+ seg_grants = ref_cnt;
+ }
+
+ scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
+ page = sg_page(sg);
+ off = sg->offset;
+ len = sg->length;
+
+ while (len > 0 && data_len > 0) {
+ /*
+ * sg sends a scatterlist that is larger than
+ * the data_len it wants transferred for certain
+ * IO sizes.
+ */
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+ bytes = min(bytes, data_len);
+
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ mfn = pfn_to_mfn(page_to_pfn(page));
+ gnttab_grant_foreign_access_ref(ref,
+ info->dev->otherend_id, mfn, grant_ro);
+
+ shadow->gref[ref_cnt] = ref;
+ seg->gref = ref;
+ seg->offset = (uint16_t)off;
+ seg->length = (uint16_t)bytes;
+
+ page++;
+ seg++;
+ len -= bytes;
+ data_len -= bytes;
+ off = 0;
+ ref_cnt++;
+ }
+ }
+
+ if (seg_grants)
+ ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
+ else
+ ring_req->nr_segments = (uint8_t)ref_cnt;
+ shadow->nr_grants = ref_cnt;
+
+ return 0;
+}
+
+static struct vscsiif_request *scsifront_command2ring(
+ struct vscsifrnt_info *info, struct scsi_cmnd *sc,
+ struct vscsifrnt_shadow *shadow)
+{
+ struct vscsiif_request *ring_req;
+
+ memset(shadow, 0, sizeof(*shadow));
+
+ ring_req = scsifront_pre_req(info);
+ if (!ring_req)
+ return NULL;
+
+ info->shadow[ring_req->rqid] = shadow;
+ shadow->rqid = ring_req->rqid;
+
+ ring_req->id = sc->device->id;
+ ring_req->lun = sc->device->lun;
+ ring_req->channel = sc->device->channel;
+ ring_req->cmd_len = sc->cmd_len;
+
+ BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
+ ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+ return ring_req;
+}
+
+static int scsifront_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *sc)
+{
+ struct vscsifrnt_info *info = shost_priv(shost);
+ struct vscsiif_request *ring_req;
+ struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
+ unsigned long flags;
+ int err;
+ uint16_t rqid;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (RING_FULL(&info->ring))
+ goto busy;
+
+ ring_req = scsifront_command2ring(info, sc, shadow);
+ if (!ring_req)
+ goto busy;
+
+ sc->result = 0;
+
+ rqid = ring_req->rqid;
+ ring_req->act = VSCSIIF_ACT_SCSI_CDB;
+
+ shadow->sc = sc;
+ shadow->act = VSCSIIF_ACT_SCSI_CDB;
+
+ err = map_data_for_request(info, sc, ring_req, shadow);
+ if (err < 0) {
+ pr_debug("%s: err %d\n", __func__, err);
+ scsifront_put_rqid(info, rqid);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ if (err == -ENOMEM)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ sc->result = DID_ERROR << 16;
+ sc->scsi_done(sc);
+ return 0;
+ }
+
+ scsifront_do_request(info);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ return 0;
+
+busy:
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ pr_debug("%s: busy\n", __func__);
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/*
+ * Any exception handling (reset or abort) must be forwarded to the backend.
+ * We have to wait until an answer is returned. This answer contains the
+ * result to be returned to the requestor.
+ */
+static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
+{
+ struct Scsi_Host *host = sc->device->host;
+ struct vscsifrnt_info *info = shost_priv(host);
+ struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
+ struct vscsiif_request *ring_req;
+ int err = 0;
+
+ shadow = kmalloc(sizeof(*shadow), GFP_NOIO);
+ if (!shadow)
+ return FAILED;
+
+ spin_lock_irq(host->host_lock);
+
+ for (;;) {
+ if (!RING_FULL(&info->ring)) {
+ ring_req = scsifront_command2ring(info, sc, shadow);
+ if (ring_req)
+ break;
+ }
+ if (err) {
+ spin_unlock_irq(host->host_lock);
+ kfree(shadow);
+ return FAILED;
+ }
+ info->wait_ring_available = 1;
+ spin_unlock_irq(host->host_lock);
+ err = wait_event_interruptible(info->wq_sync,
+ !info->wait_ring_available);
+ spin_lock_irq(host->host_lock);
+ }
+
+ ring_req->act = act;
+ ring_req->ref_rqid = s->rqid;
+
+ shadow->act = act;
+ shadow->rslt_reset = RSLT_RESET_WAITING;
+ init_waitqueue_head(&shadow->wq_reset);
+
+ ring_req->nr_segments = 0;
+
+ scsifront_do_request(info);
+
+ spin_unlock_irq(host->host_lock);
+ err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
+ spin_lock_irq(host->host_lock);
+
+ if (!err) {
+ err = shadow->rslt_reset;
+ scsifront_put_rqid(info, shadow->rqid);
+ kfree(shadow);
+ } else {
+ spin_lock(&info->shadow_lock);
+ shadow->rslt_reset = RSLT_RESET_ERR;
+ spin_unlock(&info->shadow_lock);
+ err = FAILED;
+ }
+
+ spin_unlock_irq(host->host_lock);
+ return err;
+}
+
+static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
+{
+ pr_debug("%s\n", __func__);
+ return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_ABORT);
+}
+
+static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
+{
+ pr_debug("%s\n", __func__);
+ return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
+}
+
+static int scsifront_sdev_configure(struct scsi_device *sdev)
+{
+ struct vscsifrnt_info *info = shost_priv(sdev->host);
+
+ if (info && current == info->curr)
+ xenbus_printf(XBT_NIL, info->dev->nodename,
+ info->dev_state_path, "%d", XenbusStateConnected);
+
+ return 0;
+}
+
+static void scsifront_sdev_destroy(struct scsi_device *sdev)
+{
+ struct vscsifrnt_info *info = shost_priv(sdev->host);
+
+ if (info && current == info->curr)
+ xenbus_printf(XBT_NIL, info->dev->nodename,
+ info->dev_state_path, "%d", XenbusStateClosed);
+}
+
+static struct scsi_host_template scsifront_sht = {
+ .module = THIS_MODULE,
+ .name = "Xen SCSI frontend driver",
+ .queuecommand = scsifront_queuecommand,
+ .eh_abort_handler = scsifront_eh_abort_handler,
+ .eh_device_reset_handler = scsifront_dev_reset_handler,
+ .slave_configure = scsifront_sdev_configure,
+ .slave_destroy = scsifront_sdev_destroy,
+ .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
+ .can_queue = VSCSIIF_MAX_REQS,
+ .this_id = -1,
+ .cmd_size = sizeof(struct vscsifrnt_shadow),
+ .sg_tablesize = VSCSIIF_SG_TABLESIZE,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "scsifront",
+};
+
+static int scsifront_alloc_ring(struct vscsifrnt_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ struct vscsiif_sring *sring;
+ int err = -ENOMEM;
+
+ /***** Frontend to Backend ring start *****/
+ sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL);
+ if (!sring) {
+ xenbus_dev_fatal(dev, err,
+ "fail to allocate shared ring (Front to Back)");
+ return err;
+ }
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(sring));
+ if (err < 0) {
+ free_page((unsigned long)sring);
+ xenbus_dev_fatal(dev, err,
+ "fail to grant shared ring (Front to Back)");
+ return err;
+ }
+ info->ring_ref = err;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
+ goto free_gnttab;
+ }
+
+ err = bind_evtchn_to_irq(info->evtchn);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
+ goto free_gnttab;
+ }
+
+ info->irq = err;
+
+ err = request_threaded_irq(info->irq, NULL, scsifront_irq_fn,
+ IRQF_ONESHOT, "scsifront", info);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "request_threaded_irq");
+ goto free_irq;
+ }
+
+ return 0;
+
+/* free resource */
+free_irq:
+ unbind_from_irqhandler(info->irq, info);
+free_gnttab:
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+
+ return err;
+}
+
+static int scsifront_init_ring(struct vscsifrnt_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ struct xenbus_transaction xbt;
+ int err;
+
+ pr_debug("%s\n", __func__);
+
+ err = scsifront_alloc_ring(info);
+ if (err)
+ return err;
+ pr_debug("%s: %u %u\n", __func__, info->ring_ref, info->evtchn);
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err)
+ xenbus_dev_fatal(dev, err, "starting transaction");
+
+ err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
+ info->ring_ref);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
+ goto fail;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+ info->evtchn);
+
+ if (err) {
+ xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
+ goto fail;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto free_sring;
+ }
+
+ return 0;
+
+fail:
+ xenbus_transaction_end(xbt, 1);
+free_sring:
+ unbind_from_irqhandler(info->irq, info);
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+
+ return err;
+}
+
+
+static int scsifront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ struct vscsifrnt_info *info;
+ struct Scsi_Host *host;
+ int err = -ENOMEM;
+ char name[TASK_COMM_LEN];
+
+ host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
+ if (!host) {
+ xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
+ return err;
+ }
+ info = (struct vscsifrnt_info *)host->hostdata;
+
+ dev_set_drvdata(&dev->dev, info);
+ info->dev = dev;
+
+ bitmap_fill(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
+
+ err = scsifront_init_ring(info);
+ if (err) {
+ scsi_host_put(host);
+ return err;
+ }
+
+ init_waitqueue_head(&info->wq_sync);
+ spin_lock_init(&info->shadow_lock);
+
+ snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no);
+
+ host->max_id = VSCSIIF_MAX_TARGET;
+ host->max_channel = 0;
+ host->max_lun = VSCSIIF_MAX_LUN;
+ host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
+ host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
+
+ err = scsi_add_host(host, &dev->dev);
+ if (err) {
+ dev_err(&dev->dev, "fail to add scsi host %d\n", err);
+ goto free_sring;
+ }
+ info->host = host;
+ info->host_active = 1;
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+free_sring:
+ unbind_from_irqhandler(info->irq, info);
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+ scsi_host_put(host);
+ return err;
+}
+
+static int scsifront_remove(struct xenbus_device *dev)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+ pr_debug("%s: %s removed\n", __func__, dev->nodename);
+
+ mutex_lock(&scsifront_mutex);
+ if (info->host_active) {
+ /* Scsi_host not yet removed */
+ scsi_remove_host(info->host);
+ info->host_active = 0;
+ }
+ mutex_unlock(&scsifront_mutex);
+
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+ unbind_from_irqhandler(info->irq, info);
+
+ scsi_host_put(info->host);
+
+ return 0;
+}
+
+static void scsifront_disconnect(struct vscsifrnt_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ struct Scsi_Host *host = info->host;
+
+ pr_debug("%s: %s disconnect\n", __func__, dev->nodename);
+
+ /*
+ * When this function is executed, all devices of
+ * Frontend have been deleted.
+ * Therefore, it need not block I/O before remove_host.
+ */
+
+ mutex_lock(&scsifront_mutex);
+ if (info->host_active) {
+ scsi_remove_host(host);
+ info->host_active = 0;
+ }
+ mutex_unlock(&scsifront_mutex);
+
+ xenbus_frontend_closed(dev);
+}
+
+static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
+{
+ struct xenbus_device *dev = info->dev;
+ int i, err = 0;
+ char str[64];
+ char **dir;
+ unsigned int dir_n = 0;
+ unsigned int device_state;
+ unsigned int hst, chn, tgt, lun;
+ struct scsi_device *sdev;
+
+ dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
+ if (IS_ERR(dir))
+ return;
+
+ /* mark current task as the one allowed to modify device states */
+ BUG_ON(info->curr);
+ info->curr = current;
+
+ for (i = 0; i < dir_n; i++) {
+ /* read status */
+ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
+ err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
+ &device_state);
+ if (XENBUS_EXIST_ERR(err))
+ continue;
+
+ /* virtual SCSI device */
+ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
+ err = xenbus_scanf(XBT_NIL, dev->otherend, str,
+ "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
+ if (XENBUS_EXIST_ERR(err))
+ continue;
+
+ /*
+ * Front device state path, used in slave_configure called
+ * on successfull scsi_add_device, and in slave_destroy called
+ * on remove of a device.
+ */
+ snprintf(info->dev_state_path, sizeof(info->dev_state_path),
+ "vscsi-devs/%s/state", dir[i]);
+
+ switch (op) {
+ case VSCSIFRONT_OP_ADD_LUN:
+ if (device_state != XenbusStateInitialised)
+ break;
+
+ if (scsi_add_device(info->host, chn, tgt, lun)) {
+ dev_err(&dev->dev, "scsi_add_device\n");
+ xenbus_printf(XBT_NIL, dev->nodename,
+ info->dev_state_path,
+ "%d", XenbusStateClosed);
+ }
+ break;
+ case VSCSIFRONT_OP_DEL_LUN:
+ if (device_state != XenbusStateClosing)
+ break;
+
+ sdev = scsi_device_lookup(info->host, chn, tgt, lun);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ info->curr = NULL;
+
+ kfree(dir);
+}
+
+static void scsifront_read_backend_params(struct xenbus_device *dev,
+ struct vscsifrnt_info *info)
+{
+ unsigned int sg_grant;
+ int ret;
+ struct Scsi_Host *host = info->host;
+
+ ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u",
+ &sg_grant);
+ if (ret == 1 && sg_grant) {
+ sg_grant = min_t(unsigned int, sg_grant, SG_ALL);
+ sg_grant = max_t(unsigned int, sg_grant, VSCSIIF_SG_TABLESIZE);
+ host->sg_tablesize = min_t(unsigned int, sg_grant,
+ VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
+ sizeof(struct scsiif_request_segment));
+ host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
+ }
+ dev_info(&dev->dev, "using up to %d SG entries\n", host->sg_tablesize);
+}
+
+static void scsifront_backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+ pr_debug("%s: %p %u %u\n", __func__, dev, dev->state, backend_state);
+
+ switch (backend_state) {
+ case XenbusStateUnknown:
+ case XenbusStateInitialising:
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ break;
+
+ case XenbusStateConnected:
+ scsifront_read_backend_params(dev, info);
+ if (xenbus_read_driver_state(dev->nodename) ==
+ XenbusStateInitialised)
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+
+ if (dev->state != XenbusStateConnected)
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
+ case XenbusStateClosing:
+ scsifront_disconnect(info);
+ break;
+
+ case XenbusStateReconfiguring:
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
+ xenbus_switch_state(dev, XenbusStateReconfiguring);
+ break;
+
+ case XenbusStateReconfigured:
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+ }
+}
+
+static const struct xenbus_device_id scsifront_ids[] = {
+ { "vscsi" },
+ { "" }
+};
+
+static struct xenbus_driver scsifront_driver = {
+ .ids = scsifront_ids,
+ .probe = scsifront_probe,
+ .remove = scsifront_remove,
+ .otherend_changed = scsifront_backend_changed,
+};
+
+static int __init scsifront_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ return xenbus_register_frontend(&scsifront_driver);
+}
+module_init(scsifront_init);
+
+static void __exit scsifront_exit(void)
+{
+ xenbus_unregister_driver(&scsifront_driver);
+}
+module_exit(scsifront_exit);
+
+MODULE_DESCRIPTION("Xen SCSI frontend driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:vscsi");
+MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");