summaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/char/hmcdrv_dev.c16
-rw-r--r--drivers/s390/kvm/kvm_virtio.c11
-rw-r--r--drivers/s390/kvm/virtio_ccw.c203
-rw-r--r--drivers/s390/scsi/zfcp_aux.c6
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c32
-rw-r--r--drivers/s390/scsi/zfcp_def.h3
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c52
-rw-r--r--drivers/s390/scsi/zfcp_fc.h14
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c26
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c66
13 files changed, 329 insertions, 111 deletions
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
index 0c5176179c17..43cee7fcd01c 100644
--- a/drivers/s390/char/hmcdrv_dev.c
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -136,8 +136,7 @@ static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
if (rc)
module_put(THIS_MODULE);
- pr_debug("open file '/dev/%s' with return code %d\n",
- fp->f_dentry->d_name.name, rc);
+ pr_debug("open file '/dev/%pD' with return code %d\n", fp, rc);
return rc;
}
@@ -146,7 +145,7 @@ static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
*/
static int hmcdrv_dev_release(struct inode *inode, struct file *fp)
{
- pr_debug("closing file '/dev/%s'\n", fp->f_dentry->d_name.name);
+ pr_debug("closing file '/dev/%pD'\n", fp);
kfree(fp->private_data);
fp->private_data = NULL;
hmcdrv_ftp_shutdown();
@@ -231,8 +230,8 @@ static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
*pos, ubuf, len);
- pr_debug("read from file '/dev/%s' at %lld returns %zd/%zu\n",
- fp->f_dentry->d_name.name, (long long) *pos, retlen, len);
+ pr_debug("read from file '/dev/%pD' at %lld returns %zd/%zu\n",
+ fp, (long long) *pos, retlen, len);
if (retlen > 0)
*pos += retlen;
@@ -248,8 +247,8 @@ static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
{
ssize_t retlen;
- pr_debug("writing file '/dev/%s' at pos. %lld with length %zd\n",
- fp->f_dentry->d_name.name, (long long) *pos, len);
+ pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
+ fp, (long long) *pos, len);
if (!fp->private_data) { /* first expect a cmd write */
fp->private_data = kmalloc(len + 1, GFP_KERNEL);
@@ -272,8 +271,7 @@ static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
if (retlen > 0)
*pos += retlen;
- pr_debug("write to file '/dev/%s' returned %zd\n",
- fp->f_dentry->d_name.name, retlen);
+ pr_debug("write to file '/dev/%pD' returned %zd\n", fp, retlen);
return retlen;
}
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 643129070c51..dd65c8b4c7fe 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -80,7 +80,7 @@ static unsigned desc_size(const struct kvm_device_desc *desc)
}
/* This gets the device's feature bits. */
-static u32 kvm_get_features(struct virtio_device *vdev)
+static u64 kvm_get_features(struct virtio_device *vdev)
{
unsigned int i;
u32 features = 0;
@@ -93,7 +93,7 @@ static u32 kvm_get_features(struct virtio_device *vdev)
return features;
}
-static void kvm_finalize_features(struct virtio_device *vdev)
+static int kvm_finalize_features(struct virtio_device *vdev)
{
unsigned int i, bits;
struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
@@ -103,12 +103,17 @@ static void kvm_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
+ /* Make sure we don't have any features > 32 bits! */
+ BUG_ON((u32)vdev->features != vdev->features);
+
memset(out_features, 0, desc->feature_len);
bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
for (i = 0; i < bits; i++) {
- if (test_bit(i, vdev->features))
+ if (__virtio_test_bit(vdev, i))
out_features[i / 8] |= (1 << (i % 8));
}
+
+ return 0;
}
/*
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index bda52f18e967..71d7802aa8b4 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -55,6 +55,7 @@ struct virtio_ccw_device {
struct ccw_device *cdev;
__u32 curr_io;
int err;
+ unsigned int revision; /* Transport revision */
wait_queue_head_t wait_q;
spinlock_t lock;
struct list_head virtqueues;
@@ -67,13 +68,22 @@ struct virtio_ccw_device {
void *airq_info;
};
-struct vq_info_block {
+struct vq_info_block_legacy {
__u64 queue;
__u32 align;
__u16 index;
__u16 num;
} __packed;
+struct vq_info_block {
+ __u64 desc;
+ __u32 res0;
+ __u16 index;
+ __u16 num;
+ __u64 avail;
+ __u64 used;
+} __packed;
+
struct virtio_feature_desc {
__u32 features;
__u8 index;
@@ -86,11 +96,23 @@ struct virtio_thinint_area {
u8 isc;
} __packed;
+struct virtio_rev_info {
+ __u16 revision;
+ __u16 length;
+ __u8 data[];
+};
+
+/* the highest virtio-ccw revision we support */
+#define VIRTIO_CCW_REV_MAX 1
+
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
void *queue;
- struct vq_info_block *info_block;
+ union {
+ struct vq_info_block s;
+ struct vq_info_block_legacy l;
+ } *info_block;
int bit_nr;
struct list_head node;
long cookie;
@@ -122,6 +144,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define CCW_CMD_WRITE_STATUS 0x31
#define CCW_CMD_READ_VQ_CONF 0x32
#define CCW_CMD_SET_IND_ADAPTER 0x73
+#define CCW_CMD_SET_VIRTIO_REV 0x83
#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
#define VIRTIO_CCW_DOING_RESET 0x00040000
@@ -134,6 +157,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
+#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -399,13 +423,22 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
spin_unlock_irqrestore(&vcdev->lock, flags);
/* Release from host. */
- info->info_block->queue = 0;
- info->info_block->align = 0;
- info->info_block->index = index;
- info->info_block->num = 0;
+ if (vcdev->revision == 0) {
+ info->info_block->l.queue = 0;
+ info->info_block->l.align = 0;
+ info->info_block->l.index = index;
+ info->info_block->l.num = 0;
+ ccw->count = sizeof(info->info_block->l);
+ } else {
+ info->info_block->s.desc = 0;
+ info->info_block->s.index = index;
+ info->info_block->s.num = 0;
+ info->info_block->s.avail = 0;
+ info->info_block->s.used = 0;
+ ccw->count = sizeof(info->info_block->s);
+ }
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->count = sizeof(*info->info_block);
ccw->cda = (__u32)(unsigned long)(info->info_block);
ret = ccw_io_helper(vcdev, ccw,
VIRTIO_CCW_DOING_SET_VQ | index);
@@ -488,13 +521,22 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
}
/* Register it with the host. */
- info->info_block->queue = (__u64)info->queue;
- info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
- info->info_block->index = i;
- info->info_block->num = info->num;
+ if (vcdev->revision == 0) {
+ info->info_block->l.queue = (__u64)info->queue;
+ info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
+ info->info_block->l.index = i;
+ info->info_block->l.num = info->num;
+ ccw->count = sizeof(info->info_block->l);
+ } else {
+ info->info_block->s.desc = (__u64)info->queue;
+ info->info_block->s.index = i;
+ info->info_block->s.num = info->num;
+ info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
+ info->info_block->s.used = (__u64)virtqueue_get_used(vq);
+ ccw->count = sizeof(info->info_block->s);
+ }
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->count = sizeof(*info->info_block);
ccw->cda = (__u32)(unsigned long)(info->info_block);
err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
if (err) {
@@ -660,11 +702,12 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
kfree(ccw);
}
-static u32 virtio_ccw_get_features(struct virtio_device *vdev)
+static u64 virtio_ccw_get_features(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct virtio_feature_desc *features;
- int ret, rc;
+ int ret;
+ u64 rc;
struct ccw1 *ccw;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
@@ -677,7 +720,6 @@ static u32 virtio_ccw_get_features(struct virtio_device *vdev)
goto out_free;
}
/* Read the feature bits from the host. */
- /* TODO: Features > 32 bits */
features->index = 0;
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
@@ -691,46 +733,79 @@ static u32 virtio_ccw_get_features(struct virtio_device *vdev)
rc = le32_to_cpu(features->features);
+ if (vcdev->revision == 0)
+ goto out_free;
+
+ /* Read second half of the feature bits from the host. */
+ features->index = 1;
+ ccw->cmd_code = CCW_CMD_READ_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
+ if (ret == 0)
+ rc |= (u64)le32_to_cpu(features->features) << 32;
+
out_free:
kfree(features);
kfree(ccw);
return rc;
}
-static void virtio_ccw_finalize_features(struct virtio_device *vdev)
+static int virtio_ccw_finalize_features(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct virtio_feature_desc *features;
- int i;
struct ccw1 *ccw;
+ int ret;
+
+ if (vcdev->revision >= 1 &&
+ !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
+ dev_err(&vdev->dev, "virtio: device uses revision 1 "
+ "but does not have VIRTIO_F_VERSION_1\n");
+ return -EINVAL;
+ }
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
- return;
+ return -ENOMEM;
features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
- if (!features)
+ if (!features) {
+ ret = -ENOMEM;
goto out_free;
-
+ }
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
- i++) {
- int highbits = i % 2 ? 32 : 0;
- features->index = i;
- features->features = cpu_to_le32(vdev->features[i / 2]
- >> highbits);
- /* Write the feature bits to the host. */
- ccw->cmd_code = CCW_CMD_WRITE_FEAT;
- ccw->flags = 0;
- ccw->count = sizeof(*features);
- ccw->cda = (__u32)(unsigned long)features;
- ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
- }
+ features->index = 0;
+ features->features = cpu_to_le32((u32)vdev->features);
+ /* Write the first half of the feature bits to the host. */
+ ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+ if (ret)
+ goto out_free;
+
+ if (vcdev->revision == 0)
+ goto out_free;
+
+ features->index = 1;
+ features->features = cpu_to_le32(vdev->features >> 32);
+ /* Write the second half of the feature bits to the host. */
+ ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+
out_free:
kfree(features);
kfree(ccw);
+
+ return ret;
}
static void virtio_ccw_get_config(struct virtio_device *vdev,
@@ -806,7 +881,9 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ u8 old_status = *vcdev->status;
struct ccw1 *ccw;
+ int ret;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
@@ -818,7 +895,10 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
ccw->flags = 0;
ccw->count = sizeof(status);
ccw->cda = (__u32)(unsigned long)vcdev->status;
- ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
+ /* Write failed? We assume status is unchanged. */
+ if (ret)
+ *vcdev->status = old_status;
kfree(ccw);
}
@@ -919,6 +999,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
case VIRTIO_CCW_DOING_RESET:
case VIRTIO_CCW_DOING_READ_VQ_CONF:
case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
+ case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
vcdev->curr_io &= ~activity;
wake_up(&vcdev->wait_q);
break;
@@ -1034,6 +1115,51 @@ static int virtio_ccw_offline(struct ccw_device *cdev)
return 0;
}
+static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
+{
+ struct virtio_rev_info *rev;
+ struct ccw1 *ccw;
+ int ret;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return -ENOMEM;
+ rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
+ if (!rev) {
+ kfree(ccw);
+ return -ENOMEM;
+ }
+
+ /* Set transport revision */
+ ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
+ ccw->flags = 0;
+ ccw->count = sizeof(*rev);
+ ccw->cda = (__u32)(unsigned long)rev;
+
+ vcdev->revision = VIRTIO_CCW_REV_MAX;
+ do {
+ rev->revision = vcdev->revision;
+ /* none of our supported revisions carry payload */
+ rev->length = 0;
+ ret = ccw_io_helper(vcdev, ccw,
+ VIRTIO_CCW_DOING_SET_VIRTIO_REV);
+ if (ret == -EOPNOTSUPP) {
+ if (vcdev->revision == 0)
+ /*
+ * The host device does not support setting
+ * the revision: let's operate it in legacy
+ * mode.
+ */
+ ret = 0;
+ else
+ vcdev->revision--;
+ }
+ } while (ret == -EOPNOTSUPP);
+
+ kfree(ccw);
+ kfree(rev);
+ return ret;
+}
static int virtio_ccw_online(struct ccw_device *cdev)
{
@@ -1074,6 +1200,15 @@ static int virtio_ccw_online(struct ccw_device *cdev)
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model;
+
+ if (virtio_device_is_legacy_only(vcdev->vdev.id)) {
+ vcdev->revision = 0;
+ } else {
+ ret = virtio_ccw_set_transport_rev(vcdev);
+ if (ret)
+ goto out_free;
+ }
+
ret = register_virtio_device(&vcdev->vdev);
if (ret) {
dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 8004b071a9f2..01a73395a017 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -353,9 +353,11 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->ccw_device = ccw_device;
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
- INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
+ INIT_DELAYED_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
+ adapter->next_port_scan = jiffies;
+
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -420,7 +422,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
{
struct ccw_device *cdev = adapter->ccw_device;
- cancel_work_sync(&adapter->scan_work);
+ cancel_delayed_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
cancel_work_sync(&adapter->ns_up_work);
zfcp_destroy_adapter_work_queue(adapter);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index f9879d400d0e..54c7b48fdb46 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -56,8 +56,22 @@ static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
tag);
+
+ /*
+ * We want to scan ports here, with some random backoff and without
+ * rate limit. Recovery has already scheduled a port scan for us,
+ * but with both random delay and rate limit. Nevertheless we get
+ * what we want here by flushing the scheduled work after sleeping
+ * an equivalent random time.
+ * Let the port scan random delay elapse first. If recovery finishes
+ * up to that point in time, that would be perfect for both recovery
+ * and port scan. If not, i.e. recovery takes ages, there was no
+ * point in waiting a random delay on top of the time consumed by
+ * recovery.
+ */
+ msleep(zfcp_fc_port_scan_backoff());
zfcp_erp_wait(adapter);
- flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
+ flush_delayed_work(&adapter->scan_work);
zfcp_ccw_adapter_put(adapter);
@@ -162,11 +176,19 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
adapter->req_no = 0;
zfcp_ccw_activate(cdev, 0, "ccsonl1");
- /* scan for remote ports
- either at the end of any successful adapter recovery
- or only after the adapter recovery for setting a device online */
+
+ /*
+ * We want to scan ports here, always, with some random delay and
+ * without rate limit - basically what zfcp_ccw_activate() has
+ * achieved for us. Not quite! That port scan depended on
+ * !no_auto_port_rescan. So let's cover the no_auto_port_rescan
+ * case here to make sure a port scan is done unconditionally.
+ * Since zfcp_ccw_activate() has waited the desired random time,
+ * we can immediately schedule and flush a port scan for the
+ * remaining cases.
+ */
zfcp_fc_inverse_conditional_port_scan(adapter);
- flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
+ flush_delayed_work(&adapter->scan_work);
zfcp_ccw_adapter_put(adapter);
return 0;
}
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index d91173f326c5..b8e853e53546 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -186,12 +186,13 @@ struct zfcp_adapter {
struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
- struct work_struct scan_work;
+ struct delayed_work scan_work;
struct work_struct ns_up_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
struct device_dma_parameters dma_parms;
struct zfcp_fc_events events;
+ unsigned long next_port_scan;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c82fe65c4128..2c5d4567d1da 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -821,11 +821,6 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
return ZFCP_ERP_CONTINUES;
}
-static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
-{
- atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status);
-}
-
static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
@@ -833,7 +828,6 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
- zfcp_erp_port_strategy_clearstati(port);
if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
(status & ZFCP_STATUS_COMMON_OPEN))
return zfcp_erp_port_forced_strategy_close(erp_action);
@@ -933,7 +927,6 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
- zfcp_erp_port_strategy_clearstati(port);
if (p_status & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_port_strategy_close(erp_action);
break;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index a9c570a09b85..5b500652572b 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -85,6 +85,7 @@ extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
extern void zfcp_fc_sym_name_update(struct work_struct *);
+extern unsigned int zfcp_fc_port_scan_backoff(void);
extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index ca28e1c66115..25d49f32ca63 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/utsname.h>
+#include <linux/random.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
@@ -31,12 +32,54 @@ module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
MODULE_PARM_DESC(no_auto_port_rescan,
"no automatic port_rescan (default off)");
+static unsigned int port_scan_backoff = 500;
+module_param(port_scan_backoff, uint, 0600);
+MODULE_PARM_DESC(port_scan_backoff,
+ "upper limit of port scan random backoff in msecs (default 500)");
+
+static unsigned int port_scan_ratelimit = 60000;
+module_param(port_scan_ratelimit, uint, 0600);
+MODULE_PARM_DESC(port_scan_ratelimit,
+ "minimum interval between port scans in msecs (default 60000)");
+
+unsigned int zfcp_fc_port_scan_backoff(void)
+{
+ if (!port_scan_backoff)
+ return 0;
+ return get_random_int() % port_scan_backoff;
+}
+
+static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
+{
+ unsigned long interval = msecs_to_jiffies(port_scan_ratelimit);
+ unsigned long backoff = msecs_to_jiffies(zfcp_fc_port_scan_backoff());
+
+ adapter->next_port_scan = jiffies + interval + backoff;
+}
+
+static void zfcp_fc_port_scan(struct zfcp_adapter *adapter)
+{
+ unsigned long now = jiffies;
+ unsigned long next = adapter->next_port_scan;
+ unsigned long delay = 0, max;
+
+ /* delay only needed within waiting period */
+ if (time_before(now, next)) {
+ delay = next - now;
+ /* paranoia: never ever delay scans longer than specified */
+ max = msecs_to_jiffies(port_scan_ratelimit + port_scan_backoff);
+ delay = min(delay, max);
+ }
+
+ queue_delayed_work(adapter->work_queue, &adapter->scan_work, delay);
+}
+
void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (no_auto_port_rescan)
return;
- queue_work(adapter->work_queue, &adapter->scan_work);
+ zfcp_fc_port_scan(adapter);
}
void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
@@ -44,7 +87,7 @@ void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
if (!no_auto_port_rescan)
return;
- queue_work(adapter->work_queue, &adapter->scan_work);
+ zfcp_fc_port_scan(adapter);
}
/**
@@ -680,12 +723,15 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
*/
void zfcp_fc_scan_ports(struct work_struct *work)
{
- struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+ struct delayed_work *dw = to_delayed_work(work);
+ struct zfcp_adapter *adapter = container_of(dw, struct zfcp_adapter,
scan_work);
int ret, i;
struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
+ zfcp_fc_port_scan_time(adapter);
+
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index b1d2024ed513..df2b541c8287 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -212,8 +212,6 @@ static inline
void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
u8 tm_flags)
{
- char tag[2];
-
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
if (unlikely(tm_flags)) {
@@ -221,17 +219,7 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
return;
}
- if (scsi_populate_tag_msg(scsi, tag)) {
- switch (tag[0]) {
- case MSG_ORDERED_TAG:
- fcp->fc_pri_ta |= FCP_PTA_ORDERED;
- break;
- case MSG_SIMPLE_TAG:
- fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
- break;
- };
- } else
- fcp->fc_pri_ta = FCP_PTA_SIMPLE;
+ fcp->fc_pri_ta = FCP_PTA_SIMPLE;
if (scsi->sc_data_direction == DMA_FROM_DEVICE)
fcp->fc_flags |= FCP_CFL_RDDATA;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 0fe8d5d95119..21ec5e2f584c 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1396,8 +1396,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
port->handle = header->port_handle;
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
- atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
- ZFCP_STATUS_COMMON_ACCESS_BOXED,
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
&port->status);
/* check whether D_ID has changed during open */
/*
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7b353647cb90..75f4bfc2b98a 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -32,25 +32,6 @@ static bool allow_lun_scan = 1;
module_param(allow_lun_scan, bool, 0600);
MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
-static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
- int reason)
-{
- switch (reason) {
- case SCSI_QDEPTH_DEFAULT:
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
- break;
- case SCSI_QDEPTH_QFULL:
- scsi_track_queue_full(sdev, depth);
- break;
- case SCSI_QDEPTH_RAMP_UP:
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return sdev->queue_depth;
-}
-
static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -66,9 +47,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
if (sdp->tagged_supported)
- scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
- else
- scsi_adjust_queue_depth(sdp, 0, 1);
+ scsi_change_queue_depth(sdp, default_depth);
return 0;
}
@@ -307,7 +286,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.slave_alloc = zfcp_scsi_slave_alloc,
.slave_configure = zfcp_scsi_slave_configure,
.slave_destroy = zfcp_scsi_slave_destroy,
- .change_queue_depth = zfcp_scsi_change_queue_depth,
+ .change_queue_depth = scsi_change_queue_depth,
.proc_name = "zfcp",
.can_queue = 4096,
.this_id = -1,
@@ -322,6 +301,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.use_clustering = 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
+ .track_queue_depth = 1,
};
/**
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 672b57219e11..96a0be13e841 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -73,9 +73,7 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
-ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
- (atomic_read(&port->status) &
- ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+ZFCP_DEFINE_ATTR_CONST(port, access_denied, "%d\n", 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
zfcp_unit_sdev_status(unit));
@@ -223,9 +221,13 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
if (!adapter)
return -ENODEV;
- /* sync the user-space- with the kernel-invocation of scan_work */
- queue_work(adapter->work_queue, &adapter->scan_work);
- flush_work(&adapter->scan_work);
+ /*
+ * Users wish is our command: immediately schedule and flush a
+ * worker to conduct a synchronous port scan, that is, neither
+ * a random delay nor a rate limit is applied here.
+ */
+ queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
+ flush_delayed_work(&adapter->scan_work);
zfcp_ccw_adapter_put(adapter);
return (ssize_t) count;
@@ -439,16 +441,15 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
- struct zfcp_port *port = zfcp_sdev->port; \
\
return sprintf(buf, _format, _value); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
- dev_name(&port->adapter->ccw_device->dev));
+ dev_name(&zfcp_sdev->port->adapter->ccw_device->dev));
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
- (unsigned long long) port->wwpn);
+ (unsigned long long) zfcp_sdev->port->wwpn);
static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
struct device_attribute *attr,
@@ -460,6 +461,49 @@ static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
}
static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
+ZFCP_DEFINE_SCSI_ATTR(zfcp_access_denied, "%d\n",
+ (atomic_read(&zfcp_sdev->status) &
+ ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+
+static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status);
+ unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
+
+ return sprintf(buf, "%d\n", failed);
+}
+
+static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) || val != 0)
+ return -EINVAL;
+
+ zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "syufai3");
+ zfcp_erp_wait(sdev_to_zfcp(sdev)->port->adapter);
+
+ return count;
+}
+static DEVICE_ATTR(zfcp_failed, S_IWUSR | S_IRUGO,
+ zfcp_sysfs_scsi_zfcp_failed_show,
+ zfcp_sysfs_scsi_zfcp_failed_store);
+
+ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
+ (atomic_read(&zfcp_sdev->status) &
+ ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+
+ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
+ atomic_read(&zfcp_sdev->status));
+
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,
&dev_attr_wwpn,
@@ -467,6 +511,10 @@ struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_read_latency,
&dev_attr_write_latency,
&dev_attr_cmd_latency,
+ &dev_attr_zfcp_access_denied,
+ &dev_attr_zfcp_failed,
+ &dev_attr_zfcp_in_recovery,
+ &dev_attr_zfcp_status,
NULL
};