summaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2010-05-26 05:36:58 +0400
committerHerbert Xu <herbert@gondor.apana.org.au>2010-05-26 05:36:58 +0400
commit50d1e9302bab7d35dae7146f8c468e0943015616 (patch)
treefa05320f4a297bd582686574cf94ba444e264b3f /drivers/s390
parent7cc2835083aedfde42de02301005a5555e00c4b1 (diff)
parentdc4ccfd15d4fc7a91ddf222bc5eed5cc4bcf10e6 (diff)
downloadlinux-50d1e9302bab7d35dae7146f8c468e0943015616.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/crypto-2.6
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c39
-rw-r--r--drivers/s390/block/dasd_3990_erp.c20
-rw-r--r--drivers/s390/block/dasd_alias.c125
-rw-r--r--drivers/s390/block/dasd_devmap.c174
-rw-r--r--drivers/s390/block/dasd_eckd.c116
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_int.h49
-rw-r--r--drivers/s390/char/Kconfig3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/keyboard.c21
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c2
-rw-r--r--drivers/s390/char/vmcp.c38
-rw-r--r--drivers/s390/char/zcore.c4
-rw-r--r--drivers/s390/cio/chp.c5
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/qdio.h15
-rw-r--r--drivers/s390/cio/qdio_main.c67
-rw-r--r--drivers/s390/cio/qdio_setup.c8
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/net/ctcm_main.c6
-rw-r--r--drivers/s390/net/ctcm_mpc.c6
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/qeth_core.h32
-rw-r--r--drivers/s390/net/qeth_core_main.c254
-rw-r--r--drivers/s390/net/qeth_core_mpc.h10
-rw-r--r--drivers/s390/net/qeth_core_sys.c151
-rw-r--r--drivers/s390/net/qeth_l2_main.c47
-rw-r--r--drivers/s390/net/qeth_l3_main.c108
-rw-r--r--drivers/s390/net/qeth_l3_sys.c244
-rw-r--r--drivers/s390/scsi/zfcp_aux.c7
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c1
-rw-r--r--drivers/s390/scsi/zfcp_def.h19
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h6
-rw-r--r--drivers/s390/scsi/zfcp_fc.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c246
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c108
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h104
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c23
43 files changed, 1221 insertions, 878 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index acf222f91f5a..0e86247d791e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -37,6 +37,9 @@
*/
#define DASD_CHANQ_MAX_SIZE 4
+#define DASD_SLEEPON_START_TAG (void *) 1
+#define DASD_SLEEPON_END_TAG (void *) 2
+
/*
* SECTION: exported variables of dasd.c
*/
@@ -62,6 +65,7 @@ static void dasd_device_tasklet(struct dasd_device *);
static void dasd_block_tasklet(struct dasd_block *);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
+static void do_reload_device(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
@@ -112,6 +116,7 @@ struct dasd_device *dasd_alloc_device(void)
device->timer.data = (unsigned long) device;
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
+ INIT_WORK(&device->reload_device, do_reload_device);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
@@ -518,6 +523,26 @@ void dasd_kick_device(struct dasd_device *device)
}
/*
+ * dasd_reload_device will schedule a call do do_reload_device to the kernel
+ * event daemon.
+ */
+static void do_reload_device(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ reload_device);
+ device->discipline->reload(device);
+ dasd_put_device(device);
+}
+
+void dasd_reload_device(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_reload_device to the kernel event daemon. */
+ schedule_work(&device->reload_device);
+}
+EXPORT_SYMBOL(dasd_reload_device);
+
+/*
* dasd_restore_device will schedule a call do do_restore_device to the kernel
* event daemon.
*/
@@ -1472,7 +1497,10 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
*/
static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
{
- wake_up((wait_queue_head_t *) data);
+ spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+ cqr->callback_data = DASD_SLEEPON_END_TAG;
+ spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+ wake_up(&generic_waitq);
}
static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
@@ -1482,10 +1510,7 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
device = cqr->startdev;
spin_lock_irq(get_ccwdev_lock(device->cdev));
- rc = ((cqr->status == DASD_CQR_DONE ||
- cqr->status == DASD_CQR_NEED_ERP ||
- cqr->status == DASD_CQR_TERMINATED) &&
- list_empty(&cqr->devlist));
+ rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
@@ -1573,7 +1598,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
wait_event(generic_waitq, !(device->stopped));
cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &generic_waitq;
+ cqr->callback_data = DASD_SLEEPON_START_TAG;
dasd_add_request_tail(cqr);
if (interruptible) {
rc = wait_event_interruptible(
@@ -1652,7 +1677,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
}
cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &generic_waitq;
+ cqr->callback_data = DASD_SLEEPON_START_TAG;
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 6632649dd6aa..85bfd8794856 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1418,9 +1418,29 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
struct dasd_ccw_req *erp)
{
struct dasd_ccw_req *cqr = erp->refers;
+ char *sense;
if (cqr->block &&
(cqr->block->base != cqr->startdev)) {
+
+ sense = dasd_get_sense(&erp->refers->irb);
+ /*
+ * dynamic pav may have changed base alias mapping
+ */
+ if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
+ && (sense[0] == 0x10) && (sense[7] == 0x0F)
+ && (sense[8] == 0x67)) {
+ /*
+ * remove device from alias handling to prevent new
+ * requests from being scheduled on the
+ * wrong alias device
+ */
+ dasd_alias_remove_device(cqr->startdev);
+
+ /* schedule worker to reload device */
+ dasd_reload_device(cqr->startdev);
+ }
+
if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
"ERP on alias device for request %p,"
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 8c4814258e93..4155805dcdff 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -190,20 +190,21 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
struct alias_server *server, *newserver;
struct alias_lcu *lcu, *newlcu;
int is_lcu_known;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
+
+ device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&aliastree.lock, flags);
is_lcu_known = 1;
- server = _find_server(uid);
+ server = _find_server(&uid);
if (!server) {
spin_unlock_irqrestore(&aliastree.lock, flags);
- newserver = _allocate_server(uid);
+ newserver = _allocate_server(&uid);
if (IS_ERR(newserver))
return PTR_ERR(newserver);
spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(uid);
+ server = _find_server(&uid);
if (!server) {
list_add(&newserver->server, &aliastree.serverlist);
server = newserver;
@@ -214,14 +215,14 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
}
}
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
if (!lcu) {
spin_unlock_irqrestore(&aliastree.lock, flags);
- newlcu = _allocate_lcu(uid);
+ newlcu = _allocate_lcu(&uid);
if (IS_ERR(newlcu))
return PTR_ERR(newlcu);
spin_lock_irqsave(&aliastree.lock, flags);
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
if (!lcu) {
list_add(&newlcu->lcu, &server->lculist);
lcu = newlcu;
@@ -256,20 +257,20 @@ void dasd_alias_lcu_setup_complete(struct dasd_device *device)
unsigned long flags;
struct alias_server *server;
struct alias_lcu *lcu;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
+ device->discipline->get_uid(device, &uid);
lcu = NULL;
spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(uid);
+ server = _find_server(&uid);
if (server)
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
spin_unlock_irqrestore(&aliastree.lock, flags);
if (!lcu) {
DBF_EVENT_DEVID(DBF_ERR, device->cdev,
"could not find lcu for %04x %02x",
- uid->ssid, uid->real_unit_addr);
+ uid.ssid, uid.real_unit_addr);
WARN_ON(1);
return;
}
@@ -282,20 +283,20 @@ void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
unsigned long flags;
struct alias_server *server;
struct alias_lcu *lcu;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
+ device->discipline->get_uid(device, &uid);
lcu = NULL;
spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(uid);
+ server = _find_server(&uid);
if (server)
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
spin_unlock_irqrestore(&aliastree.lock, flags);
if (!lcu) {
DBF_EVENT_DEVID(DBF_ERR, device->cdev,
"could not find lcu for %04x %02x",
- uid->ssid, uid->real_unit_addr);
+ uid.ssid, uid.real_unit_addr);
WARN_ON(1);
return;
}
@@ -314,9 +315,11 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
struct alias_lcu *lcu;
struct alias_server *server;
int was_pending;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
+ device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
list_del_init(&device->alias_list);
/* make sure that the workers don't use this device */
@@ -353,7 +356,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
_schedule_lcu_update(lcu, NULL);
spin_unlock(&lcu->lock);
}
- server = _find_server(&private->uid);
+ server = _find_server(&uid);
if (server && list_empty(&server->lculist)) {
list_del(&server->server);
_free_server(server);
@@ -366,19 +369,30 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
* in the lcu is up to date and will update the device uid before
* adding it to a pav group.
*/
+
static int _add_device_to_lcu(struct alias_lcu *lcu,
- struct dasd_device *device)
+ struct dasd_device *device,
+ struct dasd_device *pos)
{
struct dasd_eckd_private *private;
struct alias_pav_group *group;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
+ unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
- uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
- uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
- dasd_set_uid(device->cdev, &private->uid);
+
+ /* only lock if not already locked */
+ if (device != pos)
+ spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
+ CDEV_NESTED_SECOND);
+ private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
+ private->uid.base_unit_addr =
+ lcu->uac->unit[private->uid.real_unit_addr].base_ua;
+ uid = private->uid;
+
+ if (device != pos)
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* if we have no PAV anyway, we don't need to bother with PAV groups */
if (lcu->pav == NO_PAV) {
@@ -386,25 +400,25 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
return 0;
}
- group = _find_group(lcu, uid);
+ group = _find_group(lcu, &uid);
if (!group) {
group = kzalloc(sizeof(*group), GFP_ATOMIC);
if (!group)
return -ENOMEM;
- memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
- memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
- group->uid.ssid = uid->ssid;
- if (uid->type == UA_BASE_DEVICE)
- group->uid.base_unit_addr = uid->real_unit_addr;
+ memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
+ memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
+ group->uid.ssid = uid.ssid;
+ if (uid.type == UA_BASE_DEVICE)
+ group->uid.base_unit_addr = uid.real_unit_addr;
else
- group->uid.base_unit_addr = uid->base_unit_addr;
- memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
+ group->uid.base_unit_addr = uid.base_unit_addr;
+ memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
INIT_LIST_HEAD(&group->group);
INIT_LIST_HEAD(&group->baselist);
INIT_LIST_HEAD(&group->aliaslist);
list_add(&group->group, &lcu->grouplist);
}
- if (uid->type == UA_BASE_DEVICE)
+ if (uid.type == UA_BASE_DEVICE)
list_move(&device->alias_list, &group->baselist);
else
list_move(&device->alias_list, &group->aliaslist);
@@ -525,7 +539,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
if (rc)
return rc;
- spin_lock_irqsave(&lcu->lock, flags);
+ /* need to take cdev lock before lcu lock */
+ spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
+ CDEV_NESTED_FIRST);
+ spin_lock(&lcu->lock);
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
@@ -542,9 +559,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
alias_list) {
- _add_device_to_lcu(lcu, device);
+ _add_device_to_lcu(lcu, device, refdev);
}
- spin_unlock_irqrestore(&lcu->lock, flags);
+ spin_unlock(&lcu->lock);
+ spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
return 0;
}
@@ -628,9 +646,12 @@ int dasd_alias_add_device(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
rc = 0;
- spin_lock_irqsave(&lcu->lock, flags);
+
+ /* need to take cdev lock before lcu lock */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ spin_lock(&lcu->lock);
if (!(lcu->flags & UPDATE_PENDING)) {
- rc = _add_device_to_lcu(lcu, device);
+ rc = _add_device_to_lcu(lcu, device, device);
if (rc)
lcu->flags |= UPDATE_PENDING;
}
@@ -638,10 +659,19 @@ int dasd_alias_add_device(struct dasd_device *device)
list_move(&device->alias_list, &lcu->active_devices);
_schedule_lcu_update(lcu, device);
}
- spin_unlock_irqrestore(&lcu->lock, flags);
+ spin_unlock(&lcu->lock);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return rc;
}
+int dasd_alias_update_add_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ private = (struct dasd_eckd_private *) device->private;
+ private->lcu->flags |= UPDATE_PENDING;
+ return dasd_alias_add_device(device);
+}
+
int dasd_alias_remove_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
@@ -740,19 +770,30 @@ static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
struct alias_pav_group *pavgroup;
struct dasd_device *device;
struct dasd_eckd_private *private;
+ unsigned long flags;
/* active and inactive list can contain alias as well as base devices */
list_for_each_entry(device, &lcu->active_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
- if (private->uid.type != UA_BASE_DEVICE)
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (private->uid.type != UA_BASE_DEVICE) {
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+ flags);
continue;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
- if (private->uid.type != UA_BASE_DEVICE)
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (private->uid.type != UA_BASE_DEVICE) {
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+ flags);
continue;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index eff9c812c5c2..34d51dd4c539 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -49,7 +49,6 @@ struct dasd_devmap {
unsigned int devindex;
unsigned short features;
struct dasd_device *device;
- struct dasd_uid uid;
};
/*
@@ -936,42 +935,46 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
-static ssize_t
-dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dasd_alias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct dasd_devmap *devmap;
- int alias;
+ struct dasd_device *device;
+ struct dasd_uid uid;
- devmap = dasd_find_busid(dev_name(dev));
- spin_lock(&dasd_devmap_lock);
- if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
- spin_unlock(&dasd_devmap_lock);
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
return sprintf(buf, "0\n");
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid)) {
+ if (uid.type == UA_BASE_PAV_ALIAS ||
+ uid.type == UA_HYPER_PAV_ALIAS)
+ return sprintf(buf, "1\n");
}
- if (devmap->uid.type == UA_BASE_PAV_ALIAS ||
- devmap->uid.type == UA_HYPER_PAV_ALIAS)
- alias = 1;
- else
- alias = 0;
- spin_unlock(&dasd_devmap_lock);
- return sprintf(buf, alias ? "1\n" : "0\n");
+ dasd_put_device(device);
+
+ return sprintf(buf, "0\n");
}
static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
-static ssize_t
-dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dasd_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ struct dasd_uid uid;
char *vendor;
- devmap = dasd_find_busid(dev_name(dev));
- spin_lock(&dasd_devmap_lock);
- if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0)
- vendor = devmap->uid.vendor;
- else
- vendor = "";
- spin_unlock(&dasd_devmap_lock);
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ vendor = "";
+ if (IS_ERR(device))
+ return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid))
+ vendor = uid.vendor;
+
+ dasd_put_device(device);
return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
}
@@ -985,48 +988,51 @@ static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
static ssize_t
dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ struct dasd_uid uid;
char uid_string[UID_STRLEN];
char ua_string[3];
- struct dasd_uid *uid;
- devmap = dasd_find_busid(dev_name(dev));
- spin_lock(&dasd_devmap_lock);
- if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
- spin_unlock(&dasd_devmap_lock);
- return sprintf(buf, "\n");
- }
- uid = &devmap->uid;
- switch (uid->type) {
- case UA_BASE_DEVICE:
- sprintf(ua_string, "%02x", uid->real_unit_addr);
- break;
- case UA_BASE_PAV_ALIAS:
- sprintf(ua_string, "%02x", uid->base_unit_addr);
- break;
- case UA_HYPER_PAV_ALIAS:
- sprintf(ua_string, "xx");
- break;
- default:
- /* should not happen, treat like base device */
- sprintf(ua_string, "%02x", uid->real_unit_addr);
- break;
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ uid_string[0] = 0;
+ if (IS_ERR(device))
+ return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid)) {
+ switch (uid.type) {
+ case UA_BASE_DEVICE:
+ snprintf(ua_string, sizeof(ua_string), "%02x",
+ uid.real_unit_addr);
+ break;
+ case UA_BASE_PAV_ALIAS:
+ snprintf(ua_string, sizeof(ua_string), "%02x",
+ uid.base_unit_addr);
+ break;
+ case UA_HYPER_PAV_ALIAS:
+ snprintf(ua_string, sizeof(ua_string), "xx");
+ break;
+ default:
+ /* should not happen, treat like base device */
+ snprintf(ua_string, sizeof(ua_string), "%02x",
+ uid.real_unit_addr);
+ break;
+ }
+
+ if (strlen(uid.vduit) > 0)
+ snprintf(uid_string, sizeof(uid_string),
+ "%s.%s.%04x.%s.%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string,
+ uid.vduit);
+ else
+ snprintf(uid_string, sizeof(uid_string),
+ "%s.%s.%04x.%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string);
}
- if (strlen(uid->vduit) > 0)
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s.%s",
- uid->vendor, uid->serial,
- uid->ssid, ua_string,
- uid->vduit);
- else
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s",
- uid->vendor, uid->serial,
- uid->ssid, ua_string);
- spin_unlock(&dasd_devmap_lock);
+ dasd_put_device(device);
+
return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
}
-
static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
/*
@@ -1094,50 +1100,6 @@ static struct attribute_group dasd_attr_group = {
};
/*
- * Return copy of the device unique identifier.
- */
-int
-dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
-{
- struct dasd_devmap *devmap;
-
- devmap = dasd_find_busid(dev_name(&cdev->dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
- spin_lock(&dasd_devmap_lock);
- *uid = devmap->uid;
- spin_unlock(&dasd_devmap_lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(dasd_get_uid);
-
-/*
- * Register the given device unique identifier into devmap struct.
- * In addition check if the related storage server subsystem ID is already
- * contained in the dasd_server_ssid_list. If subsystem ID is not contained,
- * create new entry.
- * Return 0 if server was already in serverlist,
- * 1 if the server was added successful
- * <0 in case of error.
- */
-int
-dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
-{
- struct dasd_devmap *devmap;
-
- devmap = dasd_find_busid(dev_name(&cdev->dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
-
- spin_lock(&dasd_devmap_lock);
- devmap->uid = *uid;
- spin_unlock(&dasd_devmap_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(dasd_set_uid);
-
-/*
* Return value of the specified feature.
*/
int
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 0cb233116855..5b1cd8d6e971 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -692,18 +692,20 @@ dasd_eckd_cdl_reclen(int recid)
/*
* Generate device unique id that specifies the physical device.
*/
-static int dasd_eckd_generate_uid(struct dasd_device *device,
- struct dasd_uid *uid)
+static int dasd_eckd_generate_uid(struct dasd_device *device)
{
struct dasd_eckd_private *private;
+ struct dasd_uid *uid;
int count;
+ unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
if (!private)
return -ENODEV;
if (!private->ned || !private->gneq)
return -ENODEV;
-
+ uid = &private->uid;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, private->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
@@ -726,9 +728,25 @@ static int dasd_eckd_generate_uid(struct dasd_device *device,
private->vdsneq->uit[count]);
}
}
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
+static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+
+ if (device->private) {
+ private = (struct dasd_eckd_private *)device->private;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ *uid = private->uid;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return 0;
+ }
+ return -EINVAL;
+}
+
static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
void *rcd_buffer,
struct ciw *ciw, __u8 lpm)
@@ -1088,6 +1106,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_block *block;
+ struct dasd_uid temp_uid;
int is_known, rc;
int readonly;
@@ -1124,13 +1143,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err1;
- /* Generate device unique id and register in devmap */
- rc = dasd_eckd_generate_uid(device, &private->uid);
+ /* Generate device unique id */
+ rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err1;
- dasd_set_uid(device->cdev, &private->uid);
- if (private->uid.type == UA_BASE_DEVICE) {
+ dasd_eckd_get_uid(device, &temp_uid);
+ if (temp_uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
@@ -1451,6 +1470,7 @@ static int dasd_eckd_ready_to_online(struct dasd_device *device)
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
+ cancel_work_sync(&device->reload_device);
return dasd_alias_remove_device(device);
};
@@ -1709,10 +1729,27 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
{
char mask;
char *sense = NULL;
+ struct dasd_eckd_private *private;
+ private = (struct dasd_eckd_private *) device->private;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((scsw_dstat(&irb->scsw) & mask) == mask) {
+ /* for alias only and not in offline processing*/
+ if (!device->block && private->lcu &&
+ !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /*
+ * the state change could be caused by an alias
+ * reassignment remove device from alias handling
+ * to prevent new requests from being scheduled on
+ * the wrong alias device
+ */
+ dasd_alias_remove_device(device);
+
+ /* schedule worker to reload device */
+ dasd_reload_device(device);
+ }
+
dasd_generic_handle_state_change(device);
return;
}
@@ -3259,7 +3296,7 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
dasd_eckd_dump_sense_ccw(device, req, irb);
}
-int dasd_eckd_pm_freeze(struct dasd_device *device)
+static int dasd_eckd_pm_freeze(struct dasd_device *device)
{
/*
* the device should be disconnected from our LCU structure
@@ -3272,7 +3309,7 @@ int dasd_eckd_pm_freeze(struct dasd_device *device)
return 0;
}
-int dasd_eckd_restore_device(struct dasd_device *device)
+static int dasd_eckd_restore_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_eckd_characteristics temp_rdc_data;
@@ -3287,15 +3324,16 @@ int dasd_eckd_restore_device(struct dasd_device *device)
if (rc)
goto out_err;
- /* Generate device unique id and register in devmap */
- rc = dasd_eckd_generate_uid(device, &private->uid);
- dasd_get_uid(device->cdev, &temp_uid);
+ dasd_eckd_get_uid(device, &temp_uid);
+ /* Generate device unique id */
+ rc = dasd_eckd_generate_uid(device);
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
dev_err(&device->cdev->dev, "The UID of the DASD has "
"changed\n");
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (rc)
goto out_err;
- dasd_set_uid(device->cdev, &private->uid);
/* register lcu with alias handling, enable PAV if this is a new lcu */
is_known = dasd_alias_make_device_known_to_lcu(device);
@@ -3336,6 +3374,56 @@ out_err:
return -1;
}
+static int dasd_eckd_reload_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ int rc, old_base;
+ char print_uid[60];
+ struct dasd_uid uid;
+ unsigned long flags;
+
+ private = (struct dasd_eckd_private *) device->private;
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ old_base = private->uid.base_unit_addr;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ /* Read Configuration Data */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err;
+
+ rc = dasd_eckd_generate_uid(device);
+ if (rc)
+ goto out_err;
+ /*
+ * update unit address configuration and
+ * add device to alias management
+ */
+ dasd_alias_update_add_device(device);
+
+ dasd_eckd_get_uid(device, &uid);
+
+ if (old_base != uid.base_unit_addr) {
+ if (strlen(uid.vduit) > 0)
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
+ uid.ssid, uid.base_unit_addr, uid.vduit);
+ else
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x", uid.vendor, uid.serial,
+ uid.ssid, uid.base_unit_addr);
+
+ dev_info(&device->cdev->dev,
+ "An Alias device was reassigned to a new base device "
+ "with UID: %s\n", print_uid);
+ }
+ return 0;
+
+out_err:
+ return -1;
+}
+
static struct ccw_driver dasd_eckd_driver = {
.name = "dasd-eckd",
.owner = THIS_MODULE,
@@ -3389,6 +3477,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
.ioctl = dasd_eckd_ioctl,
.freeze = dasd_eckd_pm_freeze,
.restore = dasd_eckd_restore_device,
+ .reload = dasd_eckd_reload_device,
+ .get_uid = dasd_eckd_get_uid,
};
static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 864d53c04201..dd6385a5af14 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -426,7 +426,6 @@ struct alias_pav_group {
struct dasd_device *next;
};
-
struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
@@ -463,4 +462,5 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
void dasd_alias_lcu_setup_complete(struct dasd_device *);
void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
+int dasd_alias_update_add_device(struct dasd_device *);
#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index a91d4a97d4f2..32fac186ba3f 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -81,6 +81,10 @@ struct dasd_block;
#define DASD_SIM_MSG_TO_OP 0x03
#define DASD_SIM_LOG 0x0C
+/* lock class for nested cdev lock */
+#define CDEV_NESTED_FIRST 1
+#define CDEV_NESTED_SECOND 2
+
/*
* SECTION: MACROs for klogd and s390 debug feature (dbf)
*/
@@ -229,6 +233,24 @@ struct dasd_ccw_req {
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
/*
+ * Unique identifier for dasd device.
+ */
+#define UA_NOT_CONFIGURED 0x00
+#define UA_BASE_DEVICE 0x01
+#define UA_BASE_PAV_ALIAS 0x02
+#define UA_HYPER_PAV_ALIAS 0x03
+
+struct dasd_uid {
+ __u8 type;
+ char vendor[4];
+ char serial[15];
+ __u16 ssid;
+ __u8 real_unit_addr;
+ __u8 base_unit_addr;
+ char vduit[33];
+};
+
+/*
* the struct dasd_discipline is
* sth like a table of virtual functions, if you think of dasd_eckd
* inheriting dasd...
@@ -312,28 +334,15 @@ struct dasd_discipline {
/* suspend/resume functions */
int (*freeze) (struct dasd_device *);
int (*restore) (struct dasd_device *);
-};
-extern struct dasd_discipline *dasd_diag_discipline_pointer;
-
-/*
- * Unique identifier for dasd device.
- */
-#define UA_NOT_CONFIGURED 0x00
-#define UA_BASE_DEVICE 0x01
-#define UA_BASE_PAV_ALIAS 0x02
-#define UA_HYPER_PAV_ALIAS 0x03
+ /* reload device after state change */
+ int (*reload) (struct dasd_device *);
-struct dasd_uid {
- __u8 type;
- char vendor[4];
- char serial[15];
- __u16 ssid;
- __u8 real_unit_addr;
- __u8 base_unit_addr;
- char vduit[33];
+ int (*get_uid) (struct dasd_device *, struct dasd_uid *);
};
+extern struct dasd_discipline *dasd_diag_discipline_pointer;
+
/*
* Notification numbers for extended error reporting notifications:
* The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
@@ -386,6 +395,7 @@ struct dasd_device {
struct tasklet_struct tasklet;
struct work_struct kick_work;
struct work_struct restore_device;
+ struct work_struct reload_device;
struct timer_list timer;
debug_info_t *debug_area;
@@ -582,6 +592,7 @@ void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
void dasd_kick_device(struct dasd_device *);
void dasd_restore_device(struct dasd_device *);
+void dasd_reload_device(struct dasd_device *);
void dasd_add_request_head(struct dasd_ccw_req *);
void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -629,8 +640,6 @@ void dasd_devmap_exit(void);
struct dasd_device *dasd_create_device(struct ccw_device *);
void dasd_delete_device(struct dasd_device *);
-int dasd_get_uid(struct ccw_device *, struct dasd_uid *);
-int dasd_set_uid(struct ccw_device *, struct dasd_uid *);
int dasd_get_feature(struct ccw_device *, int);
int dasd_set_feature(struct ccw_device *, int, int);
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 4e34d3686c23..40834f18754c 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -148,13 +148,12 @@ config VMLOGRDR
This driver depends on the IUCV support driver.
config VMCP
- tristate "Support for the z/VM CP interface (VM only)"
+ bool "Support for the z/VM CP interface"
depends on S390
help
Select this option if you want to be able to interact with the control
program on z/VM
-
config MONREADER
tristate "API for reading z/VM monitor service records"
depends on IUCV
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0eabcca3c92d..857dfcb7b359 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -484,6 +484,7 @@ fs3270_open(struct inode *inode, struct file *filp)
raw3270_del_view(&fp->view);
goto out;
}
+ nonseekable_open(inode, filp);
filp->private_data = fp;
out:
mutex_unlock(&fs3270_mutex);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index cb6bffe7141a..18d9a497863b 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -49,7 +49,7 @@ static unsigned char ret_diacr[NR_DEAD] = {
struct kbd_data *
kbd_alloc(void) {
struct kbd_data *kbd;
- int i, len;
+ int i;
kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
if (!kbd)
@@ -59,12 +59,11 @@ kbd_alloc(void) {
goto out_kbd;
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
if (key_maps[i]) {
- kbd->key_maps[i] =
- kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL);
+ kbd->key_maps[i] = kmemdup(key_maps[i],
+ sizeof(u_short) * NR_KEYS,
+ GFP_KERNEL);
if (!kbd->key_maps[i])
goto out_maps;
- memcpy(kbd->key_maps[i], key_maps[i],
- sizeof(u_short)*NR_KEYS);
}
}
kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
@@ -72,23 +71,21 @@ kbd_alloc(void) {
goto out_maps;
for (i = 0; i < ARRAY_SIZE(func_table); i++) {
if (func_table[i]) {
- len = strlen(func_table[i]) + 1;
- kbd->func_table[i] = kmalloc(len, GFP_KERNEL);
+ kbd->func_table[i] = kstrdup(func_table[i],
+ GFP_KERNEL);
if (!kbd->func_table[i])
goto out_func;
- memcpy(kbd->func_table[i], func_table[i], len);
}
}
kbd->fn_handler =
kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
if (!kbd->fn_handler)
goto out_func;
- kbd->accent_table =
- kmalloc(sizeof(struct kbdiacruc)*MAX_DIACR, GFP_KERNEL);
+ kbd->accent_table = kmemdup(accent_table,
+ sizeof(struct kbdiacruc) * MAX_DIACR,
+ GFP_KERNEL);
if (!kbd->accent_table)
goto out_fn_handler;
- memcpy(kbd->accent_table, accent_table,
- sizeof(struct kbdiacruc)*MAX_DIACR);
kbd->accent_table_size = accent_table_size;
return kbd;
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 62c2647f37f4..4a51e3f09689 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -102,7 +102,7 @@ static struct sclp_req *cpi_prepare_req(void)
/* set system name */
set_data(evb->system_name, system_name);
- /* set sytem level */
+ /* set system level */
evb->system_level = system_level;
/* set sysplex name */
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 5bb59d36a6d4..04e532eec032 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,24 +1,20 @@
/*
- * Copyright IBM Corp. 2004,2007
+ * Copyright IBM Corp. 2004,2010
* Interface implementation for communication with the z/VM control program
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
-
+ *
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
-#define KMSG_COMPONENT "vmcp"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <asm/compat.h>
#include <asm/cpcmd.h>
@@ -26,10 +22,6 @@
#include <asm/uaccess.h>
#include "vmcp.h"
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
-MODULE_DESCRIPTION("z/VM CP interface");
-
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
@@ -197,11 +189,8 @@ static int __init vmcp_init(void)
{
int ret;
- if (!MACHINE_IS_VM) {
- pr_warning("The z/VM CP interface device driver cannot be "
- "loaded without z/VM\n");
- return -ENODEV;
- }
+ if (!MACHINE_IS_VM)
+ return 0;
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug)
@@ -214,19 +203,8 @@ static int __init vmcp_init(void)
}
ret = misc_register(&vmcp_dev);
- if (ret) {
+ if (ret)
debug_unregister(vmcp_debug);
- return ret;
- }
-
- return 0;
-}
-
-static void __exit vmcp_exit(void)
-{
- misc_deregister(&vmcp_dev);
- debug_unregister(vmcp_debug);
+ return ret;
}
-
-module_init(vmcp_init);
-module_exit(vmcp_exit);
+device_initcall(vmcp_init);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 7217966f7d31..f5ea3384a4b9 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -445,7 +445,7 @@ static int zcore_memmap_open(struct inode *inode, struct file *filp)
}
kfree(chunk_array);
filp->private_data = buf;
- return 0;
+ return nonseekable_open(inode, filp);
}
static int zcore_memmap_release(struct inode *inode, struct file *filp)
@@ -473,7 +473,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
- return 0;
+ return nonseekable_open(inode, filp);
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1d16189f2f2d..6c9fa15aac7b 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -135,7 +135,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
/*
* Channel measurement related functions
*/
-static ssize_t chp_measurement_chars_read(struct kobject *kobj,
+static ssize_t chp_measurement_chars_read(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -182,7 +183,7 @@ static void chp_measurement_copy_block(struct cmg_entry *buf,
} while (reference_buf.values[0] != buf->values[0]);
}
-static ssize_t chp_measurement_read(struct kobject *kobj,
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 3b6f4adc5094..a83877c664a6 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -803,6 +803,7 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd,
static const struct file_operations chsc_fops = {
.owner = THIS_MODULE,
+ .open = nonseekable_open,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
};
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5feea1a371e1..f4e6cf3aceb8 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -616,7 +616,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
- s390_idle_check();
+ s390_idle_check(regs, S390_lowcore.int_clock,
+ S390_lowcore.async_enter_timer);
irq_enter();
__get_cpu_var(s390_idle).nohz_delay = 1;
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 511649115bd7..ac94ac751459 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -648,6 +648,8 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
+ struct cpuid cpu_id;
+
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
@@ -658,8 +660,9 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
- css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
- css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
+ get_cpu_id(&cpu_id);
+ css->global_pgid.cpu_id = cpu_id.ident;
+ css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
@@ -1062,6 +1065,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
}
static const struct file_operations cio_settle_proc_fops = {
+ .open = nonseekable_open,
.write = cio_settle_write,
};
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 48aa0647432b..f0037eefd44e 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -13,8 +13,8 @@
#include <asm/debug.h>
#include "chsc.h"
-#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
-#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
+#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
+#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
/*
* if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
@@ -296,10 +296,8 @@ struct qdio_q {
struct qdio_irq *irq_ptr;
struct sl *sl;
/*
- * Warning: Leave this member at the end so it won't be cleared in
- * qdio_fill_qs. A page is allocated under this pointer and used for
- * slib and sl. slib is 2048 bytes big and sl points to offset
- * PAGE_SIZE / 2.
+ * A page is allocated under this pointer and used for slib and sl.
+ * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
*/
struct slib *slib;
} __attribute__ ((aligned(256)));
@@ -372,11 +370,6 @@ static inline int multicast_outbound(struct qdio_q *q)
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
-static inline unsigned long long get_usecs(void)
-{
- return monotonic_clock() >> 12;
-}
-
#define pci_out_supported(q) \
(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 88be7b9ea6e1..00520f9a7a8e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -336,10 +336,10 @@ again:
WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
if (!start_time) {
- start_time = get_usecs();
+ start_time = get_clock();
goto again;
}
- if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
return cc;
@@ -536,7 +536,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr;
if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_usecs();
+ q->u.in.timestamp = get_clock();
return 1;
} else
return 0;
@@ -567,7 +567,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
- if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
@@ -606,7 +606,7 @@ static void qdio_kick_handler(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_inbound);
-again:
+
if (!qdio_inbound_q_moved(q))
return;
@@ -615,7 +615,10 @@ again:
if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
- goto again;
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
+ tasklet_schedule(&q->tasklet);
+ return;
+ }
}
qdio_stop_polling(q);
@@ -625,7 +628,8 @@ again:
*/
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2);
- goto again;
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
}
}
@@ -955,6 +959,9 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
+ if (irq_ptr->perf_stat_enabled)
+ irq_ptr->perf_stat.qdio_int++;
+
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
@@ -1016,30 +1023,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
-/**
- * qdio_cleanup - shutdown queues and free data structures
- * @cdev: associated ccw device
- * @how: use halt or clear to shutdown
- *
- * This function calls qdio_shutdown() for @cdev with method @how.
- * and qdio_free(). The qdio_free() return value is ignored since
- * !irq_ptr is already checked.
- */
-int qdio_cleanup(struct ccw_device *cdev, int how)
-{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- int rc;
-
- if (!irq_ptr)
- return -ENODEV;
-
- rc = qdio_shutdown(cdev, how);
-
- qdio_free(cdev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_cleanup);
-
static void qdio_shutdown_queues(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
@@ -1157,28 +1140,6 @@ int qdio_free(struct ccw_device *cdev)
EXPORT_SYMBOL_GPL(qdio_free);
/**
- * qdio_initialize - allocate and establish queues for a qdio subchannel
- * @init_data: initialization data
- *
- * This function first allocates queues via qdio_allocate() and on success
- * establishes them via qdio_establish().
- */
-int qdio_initialize(struct qdio_initialize *init_data)
-{
- int rc;
-
- rc = qdio_allocate(init_data);
- if (rc)
- return rc;
-
- rc = qdio_establish(init_data);
- if (rc)
- qdio_free(init_data->cdev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_initialize);
-
-/**
* qdio_allocate - allocate qdio queues and associated data
* @init_data: initialization data
*/
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 7f4a75465140..6326b67c45d2 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -106,10 +106,12 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
- /* must be cleared by every qdio_establish */
- memset(q, 0, ((char *)&q->slib) - ((char *)q));
- memset(q->slib, 0, PAGE_SIZE);
+ struct slib *slib = q->slib;
+ /* queue must be cleared for qdio_establish */
+ memset(q, 0, sizeof(*q));
+ memset(slib, 0, PAGE_SIZE);
+ q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
q->nr = i;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index ce5f8910ff83..8daf1b99f153 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -95,7 +95,7 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
for_each_input_queue(irq_ptr, q, i)
list_add_rcu(&q->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
- xchg(irq_ptr->dsci, 1);
+ xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -173,7 +173,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
/* prevent racing */
if (*tiqdio_alsi)
- xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
+ xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
}
}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 304caf549973..41e0aaefafd5 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -302,7 +302,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
static int zcrypt_open(struct inode *inode, struct file *filp)
{
atomic_inc(&zcrypt_open_count);
- return 0;
+ return nonseekable_open(inode, filp);
}
/**
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index e35713dd0504..4ecafbf91211 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1364,8 +1364,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
ch->protocol = priv->protocol;
if (IS_MPC(priv)) {
- ch->discontact_th = (struct th_header *)
- kzalloc(TH_HEADER_LENGTH, gfp_type());
+ ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type());
if (ch->discontact_th == NULL)
goto nomem_return;
@@ -1379,8 +1378,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
} else
ccw_num = 8;
- ch->ccw = (struct ccw1 *)
- kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
if (ch->ccw == NULL)
goto nomem_return;
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 5978b390153f..87c24d2936d6 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -669,8 +669,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
goto done;
}
- header = (struct th_sweep *)
- kmalloc(sizeof(struct th_sweep), gfp_type());
+ header = kmalloc(sizeof(struct th_sweep), gfp_type());
if (!header) {
dev_kfree_skb_any(sweep_skb);
@@ -1191,8 +1190,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
skb_pull(pskb, new_len); /* point to next PDU */
}
} else {
- mpcginfo = (struct mpcg_info *)
- kmalloc(sizeof(struct mpcg_info), gfp_type());
+ mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type());
if (mpcginfo == NULL)
goto done;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 9b19ea13b4d8..0f19d540b655 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1238,8 +1238,7 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
ipm = lcs_check_addr_entry(card, im4, buf);
if (ipm != NULL)
continue; /* Address already in list. */
- ipm = (struct lcs_ipm_list *)
- kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
+ ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
if (ipm == NULL) {
pr_info("Not enough memory to add"
" new multicast entry!\n");
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index fcd005aad989..7a44c38aaf65 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -179,25 +179,23 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
((prot == QETH_PROT_IPV6) ? \
qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
-#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
-#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
+#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
#define QETH_MODELLIST_ARRAY \
- {{0x1731, 0x01, 0x1732, 0x01, QETH_CARD_TYPE_OSAE, 1, \
- QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
- QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
- QETH_MAX_QUEUES, 0}, \
- {0x1731, 0x05, 0x1732, 0x05, QETH_CARD_TYPE_IQD, 0, \
- QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
- QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
- QETH_MAX_QUEUES, 0x103}, \
- {0x1731, 0x06, 0x1732, 0x06, QETH_CARD_TYPE_OSN, 0, \
- QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
- QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
- QETH_MAX_QUEUES, 0}, \
- {0, 0, 0, 0, 0, 0, 0, 0, 0} }
+ {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x05, 0x1732, QETH_CARD_TYPE_IQD, QETH_MAX_QUEUES, 0x103}, \
+ {0x1731, 0x06, 0x1732, QETH_CARD_TYPE_OSN, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSM, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSX, QETH_MAX_QUEUES, 0}, \
+ {0, 0, 0, 0, 0, 0} }
+#define QETH_CU_TYPE_IND 0
+#define QETH_CU_MODEL_IND 1
+#define QETH_DEV_TYPE_IND 2
+#define QETH_DEV_MODEL_IND 3
+#define QETH_QUEUE_NO_IND 4
+#define QETH_MULTICAST_IND 5
#define QETH_REAL_CARD 1
#define QETH_VLAN_CARD 2
@@ -351,7 +349,7 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
-#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
+#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
@@ -630,6 +628,7 @@ struct qeth_card_info {
int unique_id;
struct qeth_card_blkt blkt;
__u32 csum_mask;
+ __u32 tx_csum_mask;
enum qeth_ipa_promisc_modes promisc_mode;
};
@@ -739,6 +738,7 @@ struct qeth_card {
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
+ struct mutex conf_mutex;
};
struct qeth_card_list_struct {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3ba738b2e271..13ef46b9d388 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -53,7 +53,7 @@ struct kmem_cache *qeth_core_header_cache;
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct device *qeth_core_root_dev;
-static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
+static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_send_control_data_cb(struct qeth_channel *,
@@ -111,21 +111,29 @@ static inline const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
return " Guest LAN QDIO";
case QETH_CARD_TYPE_IQD:
return " Guest LAN Hiper";
+ case QETH_CARD_TYPE_OSM:
+ return " Guest LAN QDIO - OSM";
+ case QETH_CARD_TYPE_OSX:
+ return " Guest LAN QDIO - OSX";
default:
return " unknown";
}
} else {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
return " OSD Express";
case QETH_CARD_TYPE_IQD:
return " HiperSockets";
case QETH_CARD_TYPE_OSN:
return " OSN QDIO";
+ case QETH_CARD_TYPE_OSM:
+ return " OSM QDIO";
+ case QETH_CARD_TYPE_OSX:
+ return " OSX QDIO";
default:
return " unknown";
}
@@ -138,16 +146,20 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
return "GuestLAN QDIO";
case QETH_CARD_TYPE_IQD:
return "GuestLAN Hiper";
+ case QETH_CARD_TYPE_OSM:
+ return "GuestLAN OSM";
+ case QETH_CARD_TYPE_OSX:
+ return "GuestLAN OSX";
default:
return "unknown";
}
} else {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
switch (card->info.link_type) {
case QETH_LINK_TYPE_FAST_ETH:
return "OSD_100";
@@ -172,6 +184,10 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "HiperSockets";
case QETH_CARD_TYPE_OSN:
return "OSN";
+ case QETH_CARD_TYPE_OSM:
+ return "OSM_1000";
+ case QETH_CARD_TYPE_OSX:
+ return "OSX_10GIG";
default:
return "unknown";
}
@@ -419,7 +435,8 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
-static int qeth_check_idx_response(unsigned char *buffer)
+static int qeth_check_idx_response(struct qeth_card *card,
+ unsigned char *buffer)
{
if (!buffer)
return 0;
@@ -434,6 +451,12 @@ static int qeth_check_idx_response(unsigned char *buffer)
QETH_DBF_TEXT(TRACE, 2, "ckidxres");
QETH_DBF_TEXT(TRACE, 2, " idxterm");
QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
+ if (buffer[4] == 0xf6) {
+ dev_err(&card->gdev->dev,
+ "The qeth device is not configured "
+ "for the OSI layer required by z/VM\n");
+ return -EPERM;
+ }
return -EIO;
}
return 0;
@@ -528,18 +551,19 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
struct qeth_ipa_cmd *cmd;
unsigned long flags;
int keep_reply;
+ int rc = 0;
QETH_DBF_TEXT(TRACE, 4, "sndctlcb");
card = CARD_FROM_CDEV(channel->ccwdev);
- if (qeth_check_idx_response(iob->data)) {
+ rc = qeth_check_idx_response(card, iob->data);
+ switch (rc) {
+ case 0:
+ break;
+ case -EIO:
qeth_clear_ipacmd_list(card);
- if (((iob->data[2] & 0xc0) == 0xc0) && iob->data[4] == 0xf6)
- dev_err(&card->gdev->dev,
- "The qeth device is not configured "
- "for the OSI layer required by z/VM\n");
- else
- qeth_schedule_recovery(card);
+ qeth_schedule_recovery(card);
+ default:
goto out;
}
@@ -606,7 +630,7 @@ static int qeth_setup_channel(struct qeth_channel *channel)
QETH_DBF_TEXT(SETUP, 2, "setupch");
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
- channel->iob[cnt].data = (char *)
+ channel->iob[cnt].data =
kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
if (channel->iob[cnt].data == NULL)
break;
@@ -719,7 +743,7 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x ",
+ QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
dev_name(&cdev->dev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
@@ -998,9 +1022,8 @@ static void qeth_clean_channel(struct qeth_channel *channel)
kfree(channel->iob[cnt].data);
}
-static int qeth_is_1920_device(struct qeth_card *card)
+static void qeth_get_channel_path_desc(struct qeth_card *card)
{
- int single_queue = 0;
struct ccw_device *ccwdev;
struct channelPath_dsc {
u8 flags;
@@ -1013,17 +1036,25 @@ static int qeth_is_1920_device(struct qeth_card *card)
u8 chpp;
} *chp_dsc;
- QETH_DBF_TEXT(SETUP, 2, "chk_1920");
+ QETH_DBF_TEXT(SETUP, 2, "chp_desc");
ccwdev = card->data.ccwdev;
chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
if (chp_dsc != NULL) {
/* CHPP field bit 6 == 1 -> single queue */
- single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
+ if ((chp_dsc->chpp & 0x02) == 0x02)
+ card->qdio.no_out_queues = 1;
+ card->info.func_level = 0x4100 + chp_dsc->desc;
kfree(chp_dsc);
}
- QETH_DBF_TEXT_(SETUP, 2, "rc:%x", single_queue);
- return single_queue;
+ if (card->qdio.no_out_queues == 1) {
+ card->qdio.default_out_queue = 0;
+ dev_info(&card->gdev->dev,
+ "Priority Queueing not supported\n");
+ }
+ QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
+ QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+ return;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1100,6 +1131,7 @@ static int qeth_setup_card(struct qeth_card *card)
spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
+ mutex_init(&card->conf_mutex);
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
@@ -1170,18 +1202,17 @@ static int qeth_determine_card_type(struct qeth_card *card)
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- while (known_devices[i][4]) {
- if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
- (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
- card->info.type = known_devices[i][4];
- card->qdio.no_out_queues = known_devices[i][8];
- card->info.is_multicast_different = known_devices[i][9];
- if (qeth_is_1920_device(card)) {
- dev_info(&card->gdev->dev,
- "Priority Queueing not supported\n");
- card->qdio.no_out_queues = 1;
- card->qdio.default_out_queue = 0;
- }
+ while (known_devices[i][QETH_DEV_MODEL_IND]) {
+ if ((CARD_RDEV(card)->id.dev_type ==
+ known_devices[i][QETH_DEV_TYPE_IND]) &&
+ (CARD_RDEV(card)->id.dev_model ==
+ known_devices[i][QETH_DEV_MODEL_IND])) {
+ card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
+ card->qdio.no_out_queues =
+ known_devices[i][QETH_QUEUE_NO_IND];
+ card->info.is_multicast_different =
+ known_devices[i][QETH_MULTICAST_IND];
+ qeth_get_channel_path_desc(card);
return 0;
}
i++;
@@ -1292,13 +1323,14 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
if (card->info.type == QETH_CARD_TYPE_IQD)
- rc = qdio_cleanup(CARD_DDEV(card),
+ rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
- rc = qdio_cleanup(CARD_DDEV(card),
+ rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
@@ -1398,22 +1430,20 @@ static void qeth_init_tokens(struct qeth_card *card)
static void qeth_init_func_level(struct qeth_card *card)
{
- if (card->ipato.enabled) {
- if (card->info.type == QETH_CARD_TYPE_IQD)
- card->info.func_level =
- QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
- else
- card->info.func_level =
- QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
- } else {
- if (card->info.type == QETH_CARD_TYPE_IQD)
- /*FIXME:why do we have same values for dis and ena for
- osae??? */
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_IQD:
+ if (card->ipato.enabled)
card->info.func_level =
- QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+ QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
else
card->info.func_level =
- QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
+ QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+ break;
+ case QETH_CARD_TYPE_OSD:
+ card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
+ break;
+ default:
+ break;
}
}
@@ -1560,7 +1590,7 @@ static void qeth_idx_write_cb(struct qeth_channel *channel,
card = CARD_FROM_CDEV(channel->ccwdev);
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
dev_err(&card->write.ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
@@ -1596,27 +1626,35 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
}
card = CARD_FROM_CDEV(channel->ccwdev);
- if (qeth_check_idx_response(iob->data))
+ if (qeth_check_idx_response(card, iob->data))
goto out;
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
+ case QETH_IDX_ACT_ERR_EXCL:
dev_err(&card->write.ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
- else
+ break;
+ case QETH_IDX_ACT_ERR_AUTH:
+ dev_err(&card->read.ccwdev->dev,
+ "Setting the device online failed because of "
+ "insufficient LPAR authorization\n");
+ break;
+ default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
dev_name(&card->read.ccwdev->dev));
+ }
goto out;
}
/**
- * temporary fix for microcode bug
- * to revert it,replace OR by AND
- */
+ * * temporary fix for microcode bug
+ * * to revert it,replace OR by AND
+ * */
if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
- (card->info.type == QETH_CARD_TYPE_OSAE))
+ (card->info.type == QETH_CARD_TYPE_OSD))
card->info.portname_required = 1;
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
@@ -1825,7 +1863,7 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
return 1500;
case QETH_CARD_TYPE_IQD:
return card->info.max_mtu;
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
switch (card->info.link_type) {
case QETH_LINK_TYPE_HSTR:
case QETH_LINK_TYPE_LANE_TR:
@@ -1833,6 +1871,9 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
default:
return 1492;
}
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
+ return 1492;
default:
return 1500;
}
@@ -1843,8 +1884,10 @@ static inline int qeth_get_max_mtu_for_card(int cardtype)
switch (cardtype) {
case QETH_CARD_TYPE_UNKNOWN:
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSN:
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
return 61440;
case QETH_CARD_TYPE_IQD:
return 57344;
@@ -1882,7 +1925,9 @@ static inline int qeth_get_mtu_outof_framesize(int framesize)
static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
return ((mtu >= 576) && (mtu <= 61440));
case QETH_CARD_TYPE_IQD:
return ((mtu >= 576) &&
@@ -1933,6 +1978,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
card->info.link_type = link_type;
} else
card->info.link_type = 0;
+ QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type);
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -1976,6 +2022,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_cmd_buffer *iob;
+ int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
@@ -1983,8 +2030,15 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
memcpy(&card->token.ulp_connection_r,
QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
+ if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
+ 3)) {
+ QETH_DBF_TEXT(SETUP, 2, "olmlimit");
+ dev_err(&card->gdev->dev, "A connection could not be "
+ "established because of an OLM limit\n");
+ rc = -EMLINK;
+ }
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
- return 0;
+ return rc;
}
static int qeth_ulp_setup(struct qeth_card *card)
@@ -2237,7 +2291,9 @@ static void qeth_print_status_no_portname(struct qeth_card *card)
void qeth_print_status_message(struct qeth_card *card)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
/* VM will use a non-zero first character
* to indicate a HiperSockets like reporting
* of the level OSA sets the first character to zero
@@ -2544,9 +2600,11 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
QETH_DBF_TEXT(TRACE, 3, "quyadpcb");
cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
+ if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+ QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
+ }
card->options.adp.supported_funcs =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
@@ -2936,7 +2994,8 @@ EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
- if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
+ if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX))
return card->qdio.default_out_queue;
switch (card->qdio.no_out_queues) {
case 4:
@@ -3498,13 +3557,14 @@ int qeth_set_access_ctrl_online(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 4, "setactlo");
- if (card->info.type == QETH_CARD_TYPE_OSAE &&
- qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+ if ((card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX) &&
+ qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation);
if (rc) {
QETH_DBF_MESSAGE(3,
- "IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
+ "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
card->gdev->dev.kobj.name,
rc);
}
@@ -3810,10 +3870,18 @@ static int qeth_qdio_establish(struct qeth_card *card)
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
- rc = qdio_initialize(&init_data);
- if (rc)
+ rc = qdio_allocate(&init_data);
+ if (rc) {
+ atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+ goto out;
+ }
+ rc = qdio_establish(&init_data);
+ if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+ qdio_free(CARD_DDEV(card));
+ }
}
+out:
kfree(out_sbal_ptrs);
kfree(in_sbal_ptrs);
kfree(qib_param_field);
@@ -3836,9 +3904,16 @@ static void qeth_core_free_card(struct qeth_card *card)
}
static struct ccw_device_id qeth_ids[] = {
- {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
- {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
- {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
+ .driver_info = QETH_CARD_TYPE_OSD},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
+ .driver_info = QETH_CARD_TYPE_IQD},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
+ .driver_info = QETH_CARD_TYPE_OSN},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
+ .driver_info = QETH_CARD_TYPE_OSM},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
+ .driver_info = QETH_CARD_TYPE_OSX},
{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);
@@ -4242,25 +4317,25 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_card;
}
- if (card->info.type == QETH_CARD_TYPE_OSN) {
+ if (card->info.type == QETH_CARD_TYPE_OSN)
rc = qeth_core_create_osn_attributes(dev);
- if (rc)
- goto err_card;
+ else
+ rc = qeth_core_create_device_attributes(dev);
+ if (rc)
+ goto err_card;
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSN:
+ case QETH_CARD_TYPE_OSM:
rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
- if (rc) {
- qeth_core_remove_osn_attributes(dev);
- goto err_card;
- }
+ if (rc)
+ goto err_attr;
rc = card->discipline.ccwgdriver->probe(card->gdev);
- if (rc) {
- qeth_core_free_discipline(card);
- qeth_core_remove_osn_attributes(dev);
- goto err_card;
- }
- } else {
- rc = qeth_core_create_device_attributes(dev);
if (rc)
- goto err_card;
+ goto err_disc;
+ case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSX:
+ default:
+ break;
}
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
@@ -4270,6 +4345,13 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
qeth_determine_capabilities(card);
return 0;
+err_disc:
+ qeth_core_free_discipline(card);
+err_attr:
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ qeth_core_remove_osn_attributes(dev);
+ else
+ qeth_core_remove_device_attributes(dev);
err_card:
qeth_core_free_card(card);
err_dev:
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 104a3351e02b..f9ed24de7514 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -48,9 +48,11 @@ extern unsigned char IPA_PDU_HEADER[];
enum qeth_card_types {
QETH_CARD_TYPE_UNKNOWN = 0,
- QETH_CARD_TYPE_OSAE = 10,
- QETH_CARD_TYPE_IQD = 1234,
- QETH_CARD_TYPE_OSN = 11,
+ QETH_CARD_TYPE_OSD = 1,
+ QETH_CARD_TYPE_IQD = 5,
+ QETH_CARD_TYPE_OSN = 6,
+ QETH_CARD_TYPE_OSM = 3,
+ QETH_CARD_TYPE_OSX = 2,
};
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
@@ -614,6 +616,8 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
+#define QETH_IDX_ACT_ERR_EXCL 0x19
+#define QETH_IDX_ACT_ERR_AUTH 0x1E
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 25dfd5abd19b..2eb022ff2610 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -122,23 +122,32 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
unsigned int portno, limit;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
portno = simple_strtoul(buf, &tmp, 16);
- if (portno > QETH_MAX_PORTNO)
- return -EINVAL;
+ if (portno > QETH_MAX_PORTNO) {
+ rc = -EINVAL;
+ goto out;
+ }
limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
- if (portno > limit)
- return -EINVAL;
-
+ if (portno > limit) {
+ rc = -EINVAL;
+ goto out;
+ }
card->info.portno = portno;
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
@@ -165,18 +174,23 @@ static ssize_t qeth_dev_portname_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
tmp = strsep((char **) &buf, "\n");
- if ((strlen(tmp) > 8) || (strlen(tmp) == 0))
- return -EINVAL;
+ if ((strlen(tmp) > 8) || (strlen(tmp) == 0)) {
+ rc = -EINVAL;
+ goto out;
+ }
card->info.portname[0] = strlen(tmp);
/* for beauty reasons */
@@ -184,8 +198,9 @@ static ssize_t qeth_dev_portname_store(struct device *dev,
card->info.portname[i] = ' ';
strcpy(card->info.portname + 1, tmp);
ASCEBC(card->info.portname + 1, 8);
-
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
@@ -215,20 +230,25 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
/* check if 1920 devices are supported ,
* if though we have to permit priority queueing
*/
if (card->qdio.no_out_queues == 1) {
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
- return -EPERM;
+ rc = -EPERM;
+ goto out;
}
tmp = strsep((char **) &buf, "\n");
@@ -251,10 +271,11 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
} else if (!strcmp(tmp, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
@@ -277,14 +298,17 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int cnt, old_cnt;
- int rc;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
old_cnt = card->qdio.in_buf_pool.buf_count;
cnt = simple_strtoul(buf, &tmp, 10);
@@ -293,7 +317,9 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
if (old_cnt != cnt) {
rc = qeth_realloc_buffer_pool(card, cnt);
}
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
@@ -337,25 +363,27 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1)) {
if (i == card->options.performance_stats)
- return count;
+ goto out;;
card->options.performance_stats = i;
if (i == 0)
memset(&card->perf_stats, 0,
sizeof(struct qeth_perf_stats));
card->perf_stats.initial_rx_packets = card->stats.rx_packets;
card->perf_stats.initial_tx_packets = card->stats.tx_packets;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
@@ -377,15 +405,17 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i, rc;
+ int i, rc = 0;
enum qeth_discipline_id newdis;
if (!card)
return -EINVAL;
- if (((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)))
- return -EPERM;
+ mutex_lock(&card->conf_mutex);
+ if (card->state != CARD_STATE_DOWN) {
+ rc = -EPERM;
+ goto out;
+ }
i = simple_strtoul(buf, &tmp, 16);
switch (i) {
@@ -396,12 +426,13 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
newdis = QETH_DISCIPLINE_LAYER2;
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
- if (card->options.layer2 == newdis) {
- return count;
- } else {
+ if (card->options.layer2 == newdis)
+ goto out;
+ else {
if (card->discipline.ccwgdriver) {
card->discipline.ccwgdriver->remove(card->gdev);
qeth_core_free_discipline(card);
@@ -410,12 +441,12 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
rc = qeth_core_load_discipline(card, newdis);
if (rc)
- return rc;
+ goto out;
rc = card->discipline.ccwgdriver->probe(card->gdev);
- if (rc)
- return rc;
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
@@ -454,13 +485,13 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
char *tmp, *curtoken;
curtoken = (char *) buf;
- if (!card) {
- rc = -EINVAL;
- goto out;
- }
+ if (!card)
+ return -EINVAL;
+ mutex_lock(&card->conf_mutex);
/* check for unknown, too, in case we do not yet know who we are */
- if (card->info.type != QETH_CARD_TYPE_OSAE &&
+ if (card->info.type != QETH_CARD_TYPE_OSD &&
+ card->info.type != QETH_CARD_TYPE_OSX &&
card->info.type != QETH_CARD_TYPE_UNKNOWN) {
rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev, "Adapter does not "
@@ -491,6 +522,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
rc = ipa_rc;
}
out:
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -510,22 +542,25 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
-
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
i = simple_strtoul(buf, &tmp, 10);
- if (i <= max_value) {
+ if (i <= max_value)
*value = i;
- } else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_dev_blkt_total_show(struct device *dev,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 6a801dc3bf8e..d43f57a4ac66 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -56,7 +56,9 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_GET_CARD_TYPE:
- if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
+ if ((card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX) &&
!card->info.guestlan)
return 1;
return 0;
@@ -309,6 +311,10 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct qeth_vlan_vid *id;
QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
+ if (card->info.type == QETH_CARD_TYPE_OSM) {
+ QETH_DBF_TEXT(TRACE, 3, "aidOSM");
+ return;
+ }
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_DBF_TEXT(TRACE, 3, "aidREC");
return;
@@ -329,6 +335,10 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ if (card->info.type == QETH_CARD_TYPE_OSM) {
+ QETH_DBF_TEXT(TRACE, 3, "kidOSM");
+ return;
+ }
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_DBF_TEXT(TRACE, 3, "kidREC");
return;
@@ -559,8 +569,10 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
"device %s: x%x\n", CARD_BUS_ID(card), rc);
}
- if ((card->info.type == QETH_CARD_TYPE_IQD) ||
- (card->info.guestlan)) {
+ if (card->info.type == QETH_CARD_TYPE_IQD ||
+ card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX ||
+ card->info.guestlan) {
rc = qeth_setadpparms_change_macaddr(card);
if (rc) {
QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
@@ -589,8 +601,10 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -EOPNOTSUPP;
}
- if (card->info.type == QETH_CARD_TYPE_OSN) {
- QETH_DBF_TEXT(TRACE, 3, "setmcOSN");
+ if (card->info.type == QETH_CARD_TYPE_OSN ||
+ card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX) {
+ QETH_DBF_TEXT(TRACE, 3, "setmcTYP");
return -EOPNOTSUPP;
}
QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
@@ -608,7 +622,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
static void qeth_l2_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- struct dev_addr_list *dm;
struct netdev_hw_addr *ha;
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -620,8 +633,8 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
return;
qeth_l2_del_all_mc(card);
spin_lock_bh(&card->mclock);
- for (dm = dev->mc_list; dm; dm = dm->next)
- qeth_l2_add_mc(card, dm->da_addr, 0);
+ netdev_for_each_mc_addr(ha, dev)
+ qeth_l2_add_mc(card, ha->addr, 0);
netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mc(card, ha->addr, 1);
@@ -886,9 +899,6 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
- card->dev = alloc_etherdev(0);
- break;
case QETH_CARD_TYPE_IQD:
card->dev = alloc_netdev(0, "hsi%d", ether_setup);
break;
@@ -925,6 +935,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
enum qeth_card_states recover_flag;
BUG_ON(!card);
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -957,18 +968,21 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
- return 0;
+ goto out;
}
rc = -ENODEV;
goto out_remove;
} else
card->lan_online = 1;
- if (card->info.type != QETH_CARD_TYPE_OSN) {
+ if ((card->info.type == QETH_CARD_TYPE_OSD) ||
+ (card->info.type == QETH_CARD_TYPE_OSX))
/* configure isolation level */
qeth_set_access_ctrl_online(card);
+
+ if (card->info.type != QETH_CARD_TYPE_OSN &&
+ card->info.type != QETH_CARD_TYPE_OSM)
qeth_l2_process_vlans(card, 0);
- }
netif_tx_disable(card->dev);
@@ -996,6 +1010,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+out:
+ mutex_unlock(&card->conf_mutex);
return 0;
out_remove:
@@ -1008,6 +1024,7 @@ out_remove:
card->state = CARD_STATE_RECOVER;
else
card->state = CARD_STATE_DOWN;
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -1023,6 +1040,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
int rc = 0, rc2 = 0, rc3 = 0;
enum qeth_card_states recover_flag;
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -1041,6 +1059,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+ mutex_unlock(&card->conf_mutex);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fc6ca1da8b98..61adae21a464 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -54,16 +54,16 @@ int qeth_l3_set_large_send(struct qeth_card *card,
if (card->options.large_send == QETH_LARGE_SEND_TSO) {
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM;
+ NETIF_F_IP_CSUM;
} else {
card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM);
+ NETIF_F_IP_CSUM);
card->options.large_send = QETH_LARGE_SEND_NO;
rc = -EOPNOTSUPP;
}
} else {
card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM);
+ NETIF_F_IP_CSUM);
card->options.large_send = QETH_LARGE_SEND_NO;
}
return rc;
@@ -1108,6 +1108,13 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
}
+ if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
+ cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+ card->info.tx_csum_mask =
+ cmd->data.setassparms.data.flags_32bit;
+ QETH_DBF_TEXT_(TRACE, 3, "tcsu:%d", card->info.tx_csum_mask);
+ }
+
return 0;
}
@@ -1536,6 +1543,28 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
return rc;
}
+static int qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
+{
+ int rc = 0;
+
+ if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+ return rc;
+ rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+ IPA_CMD_ASS_START, 0);
+ if (rc)
+ goto err_out;
+ rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+ IPA_CMD_ASS_ENABLE, card->info.tx_csum_mask);
+ if (rc)
+ goto err_out;
+ dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n");
+ return rc;
+err_out:
+ dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s "
+ "failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card));
+ return rc;
+}
+
static int qeth_l3_start_ipa_tso(struct qeth_card *card)
{
int rc;
@@ -1578,6 +1607,7 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
qeth_l3_start_ipa_ipv6(card); /* go on*/
qeth_l3_start_ipa_broadcast(card); /* go on*/
qeth_l3_start_ipa_checksum(card); /* go on*/
+ qeth_l3_start_ipa_tx_checksum(card);
qeth_l3_start_ipa_tso(card); /* go on*/
return 0;
}
@@ -1929,7 +1959,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
return;
- for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) {
+ list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (addr) {
memcpy(&addr->u.a6.addr, &ifa->addr,
@@ -2681,7 +2711,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_GET_CARD_TYPE:
- if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
+ if ((card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX) &&
!card->info.guestlan)
return 1;
return 0;
@@ -2817,6 +2848,21 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
}
}
+static inline void qeth_l3_hdr_csum(struct qeth_card *card,
+ struct qeth_hdr *hdr, struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* tcph->check contains already the pseudo hdr checksum
+ * so just set the header flags
+ */
+ if (iph->protocol == IPPROTO_UDP)
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+}
+
static void qeth_tso_fill_header(struct qeth_card *card,
struct qeth_hdr *qhdr, struct sk_buff *skb)
{
@@ -2852,21 +2898,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
}
}
-static void qeth_tx_csum(struct sk_buff *skb)
-{
- __wsum csum;
- int offset;
-
- skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
- offset = skb->csum_start - skb_headroom(skb);
- BUG_ON(offset >= skb_headlen(skb));
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
-
- offset += skb->csum_offset;
- BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
-}
-
static inline int qeth_l3_tso_elements(struct sk_buff *skb)
{
unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
@@ -2923,12 +2954,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb_is_gso(skb))
large_send = card->options.large_send;
- else
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(skb);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
- }
if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
(skb_shinfo(skb)->nr_frags == 0)) {
@@ -3007,6 +3032,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
cast_type);
hdr->hdr.l3.length = new_skb->len - data_offset;
}
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ qeth_l3_hdr_csum(card, hdr, new_skb);
}
elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
@@ -3132,10 +3160,25 @@ static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
return rc;
}
+static int qeth_l3_ethtool_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (data) {
+ if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ return -EPERM;
+ } else
+ dev->features &= ~NETIF_F_IP_CSUM;
+
+ return 0;
+}
+
static const struct ethtool_ops qeth_l3_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .set_tx_csum = qeth_l3_ethtool_set_tx_csum,
.get_rx_csum = qeth_l3_ethtool_get_rx_csum,
.set_rx_csum = qeth_l3_ethtool_set_rx_csum,
.get_sg = ethtool_op_get_sg,
@@ -3206,7 +3249,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
- if (card->info.type == QETH_CARD_TYPE_OSAE) {
+ if (card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
#ifdef CONFIG_TR
@@ -3336,6 +3380,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
enum qeth_card_states recover_flag;
BUG_ON(!card);
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -3367,7 +3412,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
- return 0;
+ goto out;
}
rc = -ENODEV;
goto out_remove;
@@ -3414,6 +3459,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+out:
+ mutex_unlock(&card->conf_mutex);
return 0;
out_remove:
card->use_hard_stop = 1;
@@ -3425,6 +3472,7 @@ out_remove:
card->state = CARD_STATE_RECOVER;
else
card->state = CARD_STATE_DOWN;
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -3440,6 +3488,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
int rc = 0, rc2 = 0, rc3 = 0;
enum qeth_card_states recover_flag;
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -3458,6 +3507,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+ mutex_unlock(&card->conf_mutex);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 25b3e7aae44f..fb5318b30e99 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -70,10 +70,10 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
{
enum qeth_routing_types old_route_type = route->type;
char *tmp;
- int rc;
+ int rc = 0;
tmp = strsep((char **) &buf, "\n");
-
+ mutex_lock(&card->conf_mutex);
if (!strcmp(tmp, "no_router")) {
route->type = NO_ROUTER;
} else if (!strcmp(tmp, "primary_connector")) {
@@ -87,7 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
} else if (!strcmp(tmp, "multicast_router")) {
route->type = MULTICAST_ROUTER;
} else {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
if (((card->state == CARD_STATE_SOFTSETUP) ||
(card->state == CARD_STATE_UP)) &&
@@ -97,7 +98,9 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
else if (prot == QETH_PROT_IPV6)
rc = qeth_l3_setrouting_v6(card);
}
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_route4_store(struct device *dev,
@@ -157,22 +160,26 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1))
card->options.fake_broadcast = i;
- else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
@@ -200,31 +207,35 @@ static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
(card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
tmp = strsep((char **) &buf, "\n");
- if (!strcmp(tmp, "local")) {
+ if (!strcmp(tmp, "local"))
card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
- return count;
- } else if (!strcmp(tmp, "all_rings")) {
+ else if (!strcmp(tmp, "all_rings"))
card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
- return count;
- } else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
@@ -251,18 +262,22 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
(card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
i = simple_strtoul(buf, &tmp, 16);
@@ -270,10 +285,11 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
card->options.macaddr_mode = i?
QETH_TR_MACADDR_CANONICAL :
QETH_TR_MACADDR_NONCANONICAL;
- else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
@@ -297,11 +313,12 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
enum qeth_checksum_types csum_type;
char *tmp;
- int rc;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "sw_checksumming"))
csum_type = SW_CHECKSUMMING;
@@ -309,13 +326,15 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
csum_type = HW_CHECKSUMMING;
else if (!strcmp(tmp, "no_checksumming"))
csum_type = NO_CHECKSUMMING;
- else
- return -EINVAL;
+ else {
+ rc = -EINVAL;
+ goto out;
+ }
rc = qeth_l3_set_rx_csum(card, csum_type);
- if (rc)
- return rc;
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
@@ -336,7 +355,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- int ret;
+ int rc = 0;
unsigned long i;
if (!card)
@@ -345,19 +364,24 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
- ret = strict_strtoul(buf, 16, &i);
- if (ret)
- return -EINVAL;
+ rc = strict_strtoul(buf, 16, &i);
+ if (rc) {
+ rc = -EINVAL;
+ goto out;
+ }
switch (i) {
case 0:
card->options.sniffer = i;
break;
case 1:
- ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+ qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
card->options.sniffer = i;
if (card->qdio.init_pool.buf_count !=
@@ -366,11 +390,13 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
QETH_IN_BUF_COUNT_MAX);
break;
} else
- return -EPERM;
+ rc = -EPERM;
default: /* fall through */
- return -EINVAL;
+ rc = -EINVAL;
}
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
@@ -412,12 +438,11 @@ static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
else
return -EINVAL;
- if (card->options.large_send == type)
- return count;
- rc = qeth_l3_set_large_send(card, type);
- if (rc)
- return rc;
- return count;
+ mutex_lock(&card->conf_mutex);
+ if (card->options.large_send != type)
+ rc = qeth_l3_set_large_send(card, type);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
@@ -455,13 +480,17 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
@@ -470,10 +499,11 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
card->ipato.enabled = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.enabled = 0;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
@@ -497,10 +527,12 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
@@ -508,10 +540,10 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
card->ipato.invert4 = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.invert4 = 0;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
@@ -593,27 +625,28 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
struct qeth_ipato_entry *ipatoe;
u8 addr[16];
int mask_bits;
- int rc;
+ int rc = 0;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (rc)
- return rc;
+ goto out;
ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
if (!ipatoe) {
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ipatoe->proto = proto;
memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
ipatoe->mask_bits = mask_bits;
rc = qeth_l3_add_ipato_entry(card, ipatoe);
- if (rc) {
+ if (rc)
kfree(ipatoe);
- return rc;
- }
-
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
@@ -636,15 +669,14 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
{
u8 addr[16];
int mask_bits;
- int rc;
+ int rc = 0;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
- if (rc)
- return rc;
-
- qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
-
- return count;
+ if (!rc)
+ qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
@@ -677,10 +709,12 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
@@ -688,10 +722,10 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
card->ipato.invert6 = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.invert6 = 0;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
@@ -813,15 +847,12 @@ static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
u8 addr[16] = {0, };
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
- if (rc)
- return rc;
-
- rc = qeth_l3_add_vipa(card, proto, addr);
- if (rc)
- return rc;
-
- return count;
+ if (!rc)
+ rc = qeth_l3_add_vipa(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
@@ -845,13 +876,12 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
u8 addr[16];
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
- if (rc)
- return rc;
-
- qeth_l3_del_vipa(card, proto, addr);
-
- return count;
+ if (!rc)
+ qeth_l3_del_vipa(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
@@ -979,15 +1009,12 @@ static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
u8 addr[16] = {0, };
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
- if (rc)
- return rc;
-
- rc = qeth_l3_add_rxip(card, proto, addr);
- if (rc)
- return rc;
-
- return count;
+ if (!rc)
+ rc = qeth_l3_add_rxip(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
@@ -1011,13 +1038,12 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
u8 addr[16];
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
- if (rc)
- return rc;
-
- qeth_l3_del_rxip(card, proto, addr);
-
- return count;
+ if (!rc)
+ qeth_l3_del_rxip(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1e6183a86ce5..e331df2122f7 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -425,7 +425,8 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
while (atomic_read(&adapter->stat_miss) > 0)
if (zfcp_fsf_status_read(adapter->qdio)) {
- if (atomic_read(&adapter->stat_miss) >= 16) {
+ if (atomic_read(&adapter->stat_miss) >=
+ adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
NULL);
return 1;
@@ -545,6 +546,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto failed;
+ /* report size limit per scatter-gather segment */
+ adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+ adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
+
if (!zfcp_adapter_scsi_register(adapter))
return adapter;
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 25d9e0ae9c57..1a2db0a35737 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -254,6 +254,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
}
static const struct file_operations zfcp_cfdc_fops = {
+ .open = nonseekable_open,
.unlocked_ioctl = zfcp_cfdc_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zfcp_cfdc_dev_ioctl
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7131c7db1f04..9fa1b064893e 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -44,23 +44,6 @@ struct zfcp_reqlist;
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
-/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
-
-/* DMQ bug workaround: don't use last SBALE */
-#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
-
-/* index of last SBALE (with respect to DMQ bug workaround) */
-#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1)
-
-/* max. number of (data buffer) SBALEs in largest SBAL chain */
-#define ZFCP_MAX_SBALES_PER_REQ \
- (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
- /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
-
-#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
- /* max. number of (data buffer) SBALEs in largest SBAL chain
- multiplied with number of sectors per 4k block */
-
/********************* FSF SPECIFIC DEFINES *********************************/
/* ATTENTION: value must not be used by hardware */
@@ -181,6 +164,7 @@ struct zfcp_adapter {
stack abort/command
completion races */
atomic_t stat_miss; /* # missing status reads*/
+ unsigned int stat_read_buf_num;
struct work_struct stat_work;
atomic_t status; /* status of this adapter */
struct list_head erp_ready_head; /* error recovery for this
@@ -205,6 +189,7 @@ struct zfcp_adapter {
struct work_struct scan_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
+ struct device_dma_parameters dma_parms;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 0be5e7ea2828..e3dbeda97179 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -714,7 +714,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
- atomic_set(&act->adapter->stat_miss, 16);
+ atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
if (zfcp_status_read_refill(act->adapter))
return ZFCP_ERP_FAILED;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 8786a79c7f8f..48a8f93b72f5 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef ZFCP_EXT_H
@@ -143,9 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
+extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
-extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
- struct zfcp_qdio_req *, unsigned long,
+extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 2a1cbb74b99b..6f8ab43a4856 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -400,7 +400,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_adapter *adapter = port->adapter;
int ret;
- adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC);
+ adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
if (!adisc)
return -ENOMEM;
@@ -493,7 +493,7 @@ static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
if (!gpn_ft)
return NULL;
- req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
+ req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
if (!req) {
kfree(gpn_ft);
gpn_ft = NULL;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index b3b1d2f79398..9ac6a6e4a604 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -496,6 +496,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
adapter->hydra_version = bottom->adapter_type;
adapter->timer_ticks = bottom->timer_interval;
+ adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)16);
if (fc_host_permanent_port_name(shost) == -1)
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
@@ -640,37 +641,6 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
}
}
-static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
-{
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
-
- spin_lock_bh(&qdio->req_q_lock);
- if (atomic_read(&req_q->count))
- return 1;
- spin_unlock_bh(&qdio->req_q_lock);
- return 0;
-}
-
-static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
-{
- struct zfcp_adapter *adapter = qdio->adapter;
- long ret;
-
- spin_unlock_bh(&qdio->req_q_lock);
- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
- zfcp_fsf_sbal_check(qdio), 5 * HZ);
- if (ret > 0)
- return 0;
- if (!ret) {
- atomic_inc(&qdio->req_q_full);
- /* assume hanging outbound queue, try queue recovery */
- zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
- }
-
- spin_lock_bh(&qdio->req_q_lock);
- return -EIO;
-}
-
static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
{
struct zfcp_fsf_req *req;
@@ -705,10 +675,9 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
}
static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
- u32 fsf_cmd, mempool_t *pool)
+ u32 fsf_cmd, u32 sbtype,
+ mempool_t *pool)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
@@ -725,14 +694,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->adapter = adapter;
req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
- req->qdio_req.sbal_number = 1;
- req->qdio_req.sbal_first = req_q->first;
- req->qdio_req.sbal_last = req_q->first;
- req->qdio_req.sbale_curr = 1;
-
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].addr = (void *) req->req_id;
- sbale[0].flags |= SBAL_FLAGS0_COMMAND;
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
if (likely(pool))
@@ -753,10 +714,11 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
req->qtcb->header.fsf_command = req->fsf_command;
- sbale[1].addr = (void *) req->qtcb;
- sbale[1].length = sizeof(struct fsf_qtcb);
}
+ zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
+ req->qtcb, sizeof(struct fsf_qtcb));
+
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
zfcp_fsf_req_free(req);
return ERR_PTR(-EIO);
@@ -803,24 +765,19 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
- struct qdio_buffer_element *sbale;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
adapter->pool.status_read_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
- req->qdio_req.sbale_curr = 2;
-
sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
if (!sr_buf) {
retval = -ENOMEM;
@@ -828,9 +785,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
}
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
- sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
- sbale->addr = (void *) sr_buf;
- sbale->length = sizeof(*sr_buf);
+
+ zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (retval)
@@ -907,14 +864,14 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
struct zfcp_unit *unit)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct zfcp_qdio *qdio = unit->port->adapter->qdio;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.scsi_abort);
if (IS_ERR(req)) {
req = NULL;
@@ -925,9 +882,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
ZFCP_STATUS_COMMON_UNBLOCKED)))
goto out_error_free;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = unit;
req->handler = zfcp_fsf_abort_fcp_command_handler;
@@ -996,21 +951,14 @@ skip_fsfstatus:
ct->handler(ct->handler_data);
}
-static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
+static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp)
{
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
- sbale[2].addr = sg_virt(sg_req);
- sbale[2].length = sg_req->length;
- sbale[3].addr = sg_virt(sg_resp);
- sbale[3].length = sg_resp->length;
- sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
-}
-
-static int zfcp_fsf_one_sbal(struct scatterlist *sg)
-{
- return sg_is_last(sg) && sg->length <= PAGE_SIZE;
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
+ zfcp_qdio_set_sbale_last(qdio, q_req);
}
static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
@@ -1019,35 +967,34 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
int max_sbals)
{
struct zfcp_adapter *adapter = req->adapter;
- struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
- &req->qdio_req);
u32 feat = adapter->adapter_features;
int bytes;
if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
- if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
+ if (!zfcp_qdio_sg_one_sbale(sg_req) ||
+ !zfcp_qdio_sg_one_sbale(sg_resp))
return -EOPNOTSUPP;
- zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+ zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+ sg_req, sg_resp);
return 0;
}
/* use single, unchained SBAL if it can hold the request */
- if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
- zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+ if (zfcp_qdio_sg_one_sbale(sg_req) || zfcp_qdio_sg_one_sbale(sg_resp)) {
+ zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+ sg_req, sg_resp);
return 0;
}
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- SBAL_FLAGS0_TYPE_WRITE_READ,
sg_req, max_sbals);
if (bytes <= 0)
return -EIO;
req->qtcb->bottom.support.req_buf_length = bytes;
- req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+ zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- SBAL_FLAGS0_TYPE_WRITE_READ,
sg_resp, max_sbals);
req->qtcb->bottom.support.resp_buf_length = bytes;
if (bytes <= 0)
@@ -1091,10 +1038,11 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
int ret = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
+ SBAL_FLAGS0_TYPE_WRITE_READ, pool);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1103,7 +1051,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
- FSF_MAX_SBALS_PER_REQ, timeout);
+ ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
if (ret)
goto failed_send;
@@ -1187,10 +1135,11 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
int ret = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
+ SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1224,16 +1173,16 @@ out:
int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1242,9 +1191,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_CFDC |
@@ -1269,24 +1216,22 @@ out:
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_FLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out_unlock;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_config_data_handler;
req->qtcb->bottom.config.feature_selection =
@@ -1320,7 +1265,6 @@ out_unlock:
int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
{
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
int retval = -EIO;
@@ -1328,10 +1272,11 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
return -EOPNOTSUPP;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1340,9 +1285,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
req->erp_action = erp_action;
@@ -1368,7 +1311,6 @@ out:
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
@@ -1376,10 +1318,11 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
return -EOPNOTSUPP;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_FLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -1389,9 +1332,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (data)
req->data = data;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1485,17 +1426,17 @@ out:
*/
int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_port *port = erp_action->port;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1504,9 +1445,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_port_handler;
hton24(req->qtcb->bottom.support.d_id, port->d_id);
@@ -1556,16 +1495,16 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1574,9 +1513,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_port_handler;
req->data = erp_action->port;
@@ -1633,16 +1570,16 @@ out:
*/
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (unlikely(IS_ERR(req))) {
@@ -1651,9 +1588,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_wka_port_handler;
hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
@@ -1688,16 +1623,16 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (unlikely(IS_ERR(req))) {
@@ -1706,9 +1641,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_wka_port_handler;
req->data = wka_port;
@@ -1782,16 +1715,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1800,9 +1733,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = erp_action->port;
req->qtcb->header.port_handle = erp_action->port->handle;
@@ -1954,17 +1885,17 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
+ SBAL_FLAGS0_TYPE_READ,
adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1973,9 +1904,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
@@ -2041,16 +1970,16 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -2059,9 +1988,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->header.lun_handle = erp_action->unit->handle;
@@ -2289,8 +2216,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
}
+ if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
+ sbtype = SBAL_FLAGS0_TYPE_WRITE;
+
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
- adapter->pool.scsi_req);
+ sbtype, adapter->pool.scsi_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -2298,7 +2228,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- get_device(&unit->dev);
req->unit = unit;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_send_fcp_command_handler;
@@ -2323,20 +2252,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
break;
case DMA_TO_DEVICE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
- sbtype = SBAL_FLAGS0_TYPE_WRITE;
break;
case DMA_BIDIRECTIONAL:
goto failed_scsi_cmnd;
}
+ get_device(&unit->dev);
+
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
- real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
+ real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
scsi_sglist(scsi_cmnd),
- FSF_MAX_SBALS_PER_REQ);
+ ZFCP_FSF_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
- if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
+ if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
dev_err(&adapter->ccw_device->dev,
"Oversize data package, unit 0x%016Lx "
"on port 0x%016Lx closed\n",
@@ -2371,7 +2301,6 @@ out:
*/
struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct fcp_cmnd *fcp_cmnd;
struct zfcp_qdio *qdio = unit->port->adapter->qdio;
@@ -2381,10 +2310,11 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
return NULL;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+ SBAL_FLAGS0_TYPE_WRITE,
qdio->adapter->pool.scsi_req);
if (IS_ERR(req)) {
@@ -2401,9 +2331,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
@@ -2432,7 +2360,6 @@ static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
struct zfcp_fsf_cfdc *fsf_cfdc)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req = NULL;
struct fsf_qtcb_bottom_support *bottom;
@@ -2453,10 +2380,10 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
}
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
+ req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
if (IS_ERR(req)) {
retval = -EPERM;
goto out;
@@ -2464,16 +2391,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
req->handler = zfcp_fsf_control_file_handler;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= direction;
-
bottom = &req->qtcb->bottom.support;
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
bottom->option = fsf_cfdc->option;
bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
- direction, fsf_cfdc->sg,
- FSF_MAX_SBALS_PER_REQ);
+ fsf_cfdc->sg,
+ ZFCP_FSF_MAX_SBALS_PER_REQ);
if (bytes != ZFCP_CFDC_MAX_SIZE) {
zfcp_fsf_req_free(req);
goto out;
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index b3de682b64cf..519083fd6e89 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef FSF_H
@@ -152,7 +152,12 @@
#define FSF_CLASS_3 0x00000003
/* SBAL chaining */
-#define FSF_MAX_SBALS_PER_REQ 36
+#define ZFCP_FSF_MAX_SBALS_PER_REQ 36
+
+/* max. number of (data buffer) SBALEs in largest SBAL chain
+ * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
+#define ZFCP_FSF_MAX_SBALES_PER_REQ \
+ (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
/* logging space behind QTCB */
#define FSF_QTCB_LOG_SIZE 1024
@@ -361,7 +366,7 @@ struct fsf_qtcb_bottom_config {
u32 adapter_type;
u8 res0;
u8 peer_d_id[3];
- u8 res1[2];
+ u16 status_read_buf_num;
u16 timer_interval;
u8 res2[9];
u8 s_id[3];
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbfa312a7f50..28117e130e2c 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,7 +3,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -151,8 +151,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
}
static struct qdio_buffer_element *
-zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long sbtype)
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
@@ -180,17 +179,16 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->flags |= sbtype;
+ sbale->flags |= q_req->sbtype;
return sbale;
}
static struct qdio_buffer_element *
-zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned int sbtype)
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
- return zfcp_qdio_sbal_chain(qdio, q_req, sbtype);
+ if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL)
+ return zfcp_qdio_sbal_chain(qdio, q_req);
q_req->sbale_curr++;
return zfcp_qdio_sbale_curr(qdio, q_req);
}
@@ -206,62 +204,38 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
zfcp_qdio_zero_sbals(sbal, first, count);
}
-static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
- struct zfcp_qdio_req *q_req,
- unsigned int sbtype, void *start_addr,
- unsigned int total_length)
-{
- struct qdio_buffer_element *sbale;
- unsigned long remaining, length;
- void *addr;
-
- /* split segment up */
- for (addr = start_addr, remaining = total_length; remaining > 0;
- addr += length, remaining -= length) {
- sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype);
- if (!sbale) {
- atomic_inc(&qdio->req_q_full);
- zfcp_qdio_undo_sbals(qdio, q_req);
- return -EINVAL;
- }
-
- /* new piece must not exceed next page boundary */
- length = min(remaining,
- (PAGE_SIZE - ((unsigned long)addr &
- (PAGE_SIZE - 1))));
- sbale->addr = addr;
- sbale->length = length;
- }
- return 0;
-}
-
/**
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
- * @fsf_req: request to be processed
- * @sbtype: SBALE flags
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
* @max_sbals: upper bound for number of SBALs to be used
* Returns: number of bytes, or error (negativ)
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long sbtype, struct scatterlist *sg,
- int max_sbals)
+ struct scatterlist *sg, int max_sbals)
{
struct qdio_buffer_element *sbale;
- int retval, bytes = 0;
+ int bytes = 0;
/* figure out last allowed SBAL */
zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->flags |= sbtype;
+ sbale->flags |= q_req->sbtype;
for (; sg; sg = sg_next(sg)) {
- retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype,
- sg_virt(sg), sg->length);
- if (retval < 0)
- return retval;
+ sbale = zfcp_qdio_sbale_next(qdio, q_req);
+ if (!sbale) {
+ atomic_inc(&qdio->req_q_full);
+ zfcp_qdio_undo_sbals(qdio, q_req);
+ return -EINVAL;
+ }
+
+ sbale->addr = sg_virt(sg);
+ sbale->length = sg->length;
+
bytes += sg->length;
}
@@ -272,6 +246,46 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
return bytes;
}
+static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
+{
+ struct zfcp_qdio_queue *req_q = &qdio->req_q;
+
+ spin_lock_bh(&qdio->req_q_lock);
+ if (atomic_read(&req_q->count))
+ return 1;
+ spin_unlock_bh(&qdio->req_q_lock);
+ return 0;
+}
+
+/**
+ * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
+ * @qdio: pointer to struct zfcp_qdio
+ *
+ * The req_q_lock must be held by the caller of this function, and
+ * this function may only be called from process context; it will
+ * sleep when waiting for a free sbal.
+ *
+ * Returns: 0 on success, -EIO if there is no free sbal after waiting.
+ */
+int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+{
+ long ret;
+
+ spin_unlock_bh(&qdio->req_q_lock);
+ ret = wait_event_interruptible_timeout(qdio->req_q_wq,
+ zfcp_qdio_sbal_check(qdio), 5 * HZ);
+ if (ret > 0)
+ return 0;
+ if (!ret) {
+ atomic_inc(&qdio->req_q_full);
+ /* assume hanging outbound queue, try queue recovery */
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
+ }
+
+ spin_lock_bh(&qdio->req_q_lock);
+ return -EIO;
+}
+
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @qdio: pointer to struct zfcp_qdio
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 8cca54631e1e..138fba577b48 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -11,6 +11,14 @@
#include <asm/qdio.h>
+#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
+
+/* DMQ bug workaround: don't use last SBALE */
+#define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+
+/* index of last SBALE (with respect to DMQ bug workaround) */
+#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
+
/**
* struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
* @sbal: qdio buffers
@@ -49,6 +57,7 @@ struct zfcp_qdio {
/**
* struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbtype: sbal type flags for sbale 0
* @sbal_number: number of free sbals
* @sbal_first: first sbal for this request
* @sbal_last: last sbal for this request
@@ -59,6 +68,7 @@ struct zfcp_qdio {
* @qdio_inb_usage: usage of inbound queue
*/
struct zfcp_qdio_req {
+ u32 sbtype;
u8 sbal_number;
u8 sbal_first;
u8 sbal_last;
@@ -106,4 +116,98 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
q_req->sbale_curr);
}
+/**
+ * zfcp_qdio_req_init - initialize qdio request
+ * @qdio: request queue where to start putting the request
+ * @q_req: the qdio request to start
+ * @req_id: The request id
+ * @sbtype: type flags to set for all sbals
+ * @data: First data block
+ * @len: Length of first data block
+ *
+ * This is the start of putting the request into the queue, the last
+ * step is passing the request to zfcp_qdio_send. The request queue
+ * lock must be held during the whole process from init to send.
+ */
+static inline
+void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ unsigned long req_id, u32 sbtype, void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+
+ q_req->sbal_first = q_req->sbal_last = qdio->req_q.first;
+ q_req->sbal_number = 1;
+ q_req->sbtype = sbtype;
+
+ sbale = zfcp_qdio_sbale_req(qdio, q_req);
+ sbale->addr = (void *) req_id;
+ sbale->flags |= SBAL_FLAGS0_COMMAND;
+ sbale->flags |= sbtype;
+
+ q_req->sbale_curr = 1;
+ sbale++;
+ sbale->addr = data;
+ if (likely(data))
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ *
+ * This is only required for single sbal requests, calling it when
+ * wrapping around to the next sbal is a bug.
+ */
+static inline
+void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+
+ BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL);
+ q_req->sbale_curr++;
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->addr = data;
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ */
+static inline
+void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req)
+{
+ struct qdio_buffer_element *sbale;
+
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+}
+
+/**
+ * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
+ * @sg: The scatterlist where to check the data size
+ *
+ * Returns: 1 when one sbale is enough for the data in the scatterlist,
+ * 0 if not.
+ */
+static inline
+int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
+{
+ return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
+}
+
+/**
+ * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
+{
+ q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
+}
+
#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 174b6d57d576..be5d2c60453d 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -175,7 +175,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
- int retval = SUCCESS;
+ int retval = SUCCESS, ret;
int retry = 3;
char *dbf_tag;
@@ -200,7 +200,9 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
break;
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -231,7 +233,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
struct zfcp_fsf_req *fsf_req = NULL;
- int retval = SUCCESS;
+ int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
@@ -240,7 +242,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
break;
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
+
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -276,10 +281,13 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
+ int ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
return SUCCESS;
}
@@ -669,11 +677,12 @@ struct zfcp_data zfcp_data = {
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.can_queue = 4096,
.this_id = -1,
- .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
+ .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ,
.cmd_per_lun = 1,
.use_clustering = 1,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8),
+ .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8),
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
},
};