summaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/Makefile3
-rw-r--r--drivers/s390/block/dasd.c250
-rw-r--r--drivers/s390/block/dasd_3990_erp.c871
-rw-r--r--drivers/s390/block/dasd_alias.c35
-rw-r--r--drivers/s390/block/dasd_devmap.c43
-rw-r--r--drivers/s390/block/dasd_diag.c66
-rw-r--r--drivers/s390/block/dasd_eckd.c1297
-rw-r--r--drivers/s390/block/dasd_eckd.h49
-rw-r--r--drivers/s390/block/dasd_eer.c27
-rw-r--r--drivers/s390/block/dasd_erp.c21
-rw-r--r--drivers/s390/block/dasd_fba.c77
-rw-r--r--drivers/s390/block/dasd_genhd.c7
-rw-r--r--drivers/s390/block/dasd_int.h13
-rw-r--r--drivers/s390/block/dasd_ioctl.c46
-rw-r--r--drivers/s390/block/dasd_proc.c24
-rw-r--r--drivers/s390/char/tape.h2
-rw-r--r--drivers/s390/char/tape_34xx.c161
-rw-r--r--drivers/s390/char/tape_3590.c367
-rw-r--r--drivers/s390/char/tape_block.c18
-rw-r--r--drivers/s390/char/tape_char.c7
-rw-r--r--drivers/s390/char/tape_core.c68
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c25
-rw-r--r--drivers/s390/char/zcore.c90
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/airq.c6
-rw-r--r--drivers/s390/cio/blacklist.c3
-rw-r--r--drivers/s390/cio/ccwgroup.c78
-rw-r--r--drivers/s390/cio/chp.c6
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/chsc_sch.c4
-rw-r--r--drivers/s390/cio/cio.c21
-rw-r--r--drivers/s390/cio/crw.c159
-rw-r--r--drivers/s390/cio/css.c62
-rw-r--r--drivers/s390/cio/device.c100
-rw-r--r--drivers/s390/cio/device.h3
-rw-r--r--drivers/s390/cio/device_fsm.c39
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/qdio.h8
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c222
-rw-r--r--drivers/s390/cio/qdio_setup.c1
-rw-r--r--drivers/s390/cio/qdio_thinint.c23
-rw-r--r--drivers/s390/crypto/zcrypt_api.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c3
-rw-r--r--drivers/s390/ebcdic.c246
-rw-r--r--drivers/s390/net/Makefile2
-rw-r--r--drivers/s390/net/claw.c485
-rw-r--r--drivers/s390/net/ctcm_fsms.c5
-rw-r--r--drivers/s390/net/ctcm_main.c39
-rw-r--r--drivers/s390/net/ctcm_mpc.c17
-rw-r--r--drivers/s390/net/lcs.c33
-rw-r--r--drivers/s390/net/netiucv.c16
-rw-r--r--drivers/s390/net/qeth_core.h8
-rw-r--r--drivers/s390/net/qeth_core_main.c165
-rw-r--r--drivers/s390/net/qeth_core_offl.c699
-rw-r--r--drivers/s390/net/qeth_core_offl.h76
-rw-r--r--drivers/s390/net/qeth_core_sys.c4
-rw-r--r--drivers/s390/net/qeth_l2_main.c99
-rw-r--r--drivers/s390/net/qeth_l3_main.c129
-rw-r--r--drivers/s390/s390mach.c538
-rw-r--r--drivers/s390/s390mach.h122
-rw-r--r--drivers/s390/scsi/zfcp_aux.c24
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c29
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c188
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h3
-rw-r--r--drivers/s390/scsi/zfcp_def.h17
-rw-r--r--drivers/s390/scsi/zfcp_erp.c290
-rw-r--r--drivers/s390/scsi/zfcp_ext.h73
-rw-r--r--drivers/s390/scsi/zfcp_fc.c84
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c240
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h4
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c47
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c250
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c20
-rw-r--r--drivers/s390/sysinfo.c469
76 files changed, 3894 insertions, 4855 deletions
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index d0eae59bc366..95bccfd3f169 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,9 +2,6 @@
# Makefile for the S/390 specific device drivers
#
-CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-
-obj-y += s390mach.o sysinfo.o
obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 08c23a921012..0570794ccf1c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -9,6 +9,9 @@
*
*/
+#define KMSG_COMPONENT "dasd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -22,6 +25,7 @@
#include <asm/ebcdic.h>
#include <asm/idals.h>
#include <asm/todclk.h>
+#include <asm/itcw.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd:"
@@ -221,7 +225,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
return rc;
}
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
- device->debug_area = debug_register(dev_name(&device->cdev->dev), 1, 1,
+ device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
8 * sizeof(long));
debug_register_view(device->debug_area, &debug_sprintf_view);
debug_set_level(device->debug_area, DBF_WARNING);
@@ -762,7 +766,7 @@ static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
return -EINVAL;
device = cqr->startdev;
if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
- DEV_MESSAGE(KERN_WARNING, device,
+ DBF_DEV_EVENT(DBF_WARNING, device,
" dasd_ccw_req 0x%08x magic doesn't match"
" discipline 0x%08x",
cqr->magic,
@@ -782,6 +786,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int retries, rc;
+ char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
@@ -815,10 +820,10 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
"device busy, retry later");
break;
default:
- DEV_MESSAGE(KERN_ERR, device,
- "line %d unknown RC=%d, please "
- "report to linux390@de.ibm.com",
- __LINE__, rc);
+ /* internal error 10 - unknown rc*/
+ snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
+ dev_err(&device->cdev->dev, "An error occurred in the "
+ "DASD device driver, reason=%s\n", errorstring);
BUG();
break;
}
@@ -836,6 +841,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
+ char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
@@ -843,17 +849,23 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
return rc;
device = (struct dasd_device *) cqr->startdev;
if (cqr->retries < 0) {
- DEV_MESSAGE(KERN_DEBUG, device,
- "start_IO: request %p (%02x/%i) - no retry left.",
- cqr, cqr->status, cqr->retries);
+ /* internal error 14 - start_IO run out of retries */
+ sprintf(errorstring, "14 %p", cqr);
+ dev_err(&device->cdev->dev, "An error occurred in the DASD "
+ "device driver, reason=%s\n", errorstring);
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
cqr->startclk = get_clock();
cqr->starttime = jiffies;
cqr->retries--;
- rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
- cqr->lpm, 0);
+ if (cqr->cpmode == 1) {
+ rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
+ (long) cqr, cqr->lpm);
+ } else {
+ rc = ccw_device_start(device->cdev, cqr->cpaddr,
+ (long) cqr, cqr->lpm, 0);
+ }
switch (rc) {
case 0:
cqr->status = DASD_CQR_IN_IO;
@@ -862,11 +874,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
cqr);
break;
case -EBUSY:
- DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: device busy, retry later");
break;
case -ETIMEDOUT:
- DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: request timeout, retry later");
break;
case -EACCES:
@@ -876,19 +888,24 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
* Do a retry with all available pathes.
*/
cqr->lpm = LPM_ANYPATH;
- DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: selected pathes gone,"
" retry on all pathes");
break;
case -ENODEV:
+ DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+ "start_IO: -ENODEV device gone, retry");
+ break;
case -EIO:
- DBF_DEV_EVENT(DBF_ERR, device, "%s",
- "start_IO: device gone, retry");
+ DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+ "start_IO: -EIO device gone, retry");
break;
default:
- DEV_MESSAGE(KERN_ERR, device,
- "line %d unknown RC=%d, please report"
- " to linux390@de.ibm.com", __LINE__, rc);
+ /* internal error 11 - unknown rc */
+ snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", errorstring);
BUG();
break;
}
@@ -945,7 +962,7 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
return;
cqr = (struct dasd_ccw_req *) intparm;
if (cqr->status != DASD_CQR_IN_IO) {
- MESSAGE(KERN_DEBUG,
+ DBF_EVENT(DBF_DEBUG,
"invalid status in handle_killed_request: "
"bus_id %s, status %02x",
dev_name(&cdev->dev), cqr->status);
@@ -956,8 +973,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
if (device == NULL ||
device != dasd_device_from_cdev_locked(cdev) ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
- MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
- dev_name(&cdev->dev));
+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
+ "bus_id %s", dev_name(&cdev->dev));
return;
}
@@ -996,11 +1013,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
case -EIO:
break;
case -ETIMEDOUT:
- printk(KERN_WARNING"%s(%s): request timed out\n",
+ DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
__func__, dev_name(&cdev->dev));
break;
default:
- printk(KERN_WARNING"%s(%s): unknown error %ld\n",
+ DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
__func__, dev_name(&cdev->dev), PTR_ERR(irb));
}
dasd_handle_killed_request(cdev, intparm);
@@ -1009,15 +1026,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
now = get_clock();
- DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
- dev_name(&cdev->dev), ((irb->scsw.cmd.cstat << 8) |
- irb->scsw.cmd.dstat), (unsigned int) intparm);
-
/* check for unsolicited interrupts */
cqr = (struct dasd_ccw_req *) intparm;
- if (!cqr || ((irb->scsw.cmd.cc == 1) &&
- (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
- (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) {
+ if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
+ (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
+ (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
if (cqr && cqr->status == DASD_CQR_IN_IO)
cqr->status = DASD_CQR_QUEUED;
device = dasd_device_from_cdev_locked(cdev);
@@ -1033,14 +1046,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
device = (struct dasd_device *) cqr->startdev;
if (!device ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
- MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
- dev_name(&cdev->dev));
+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
+ "bus_id %s", dev_name(&cdev->dev));
return;
}
/* Check for clear pending */
if (cqr->status == DASD_CQR_CLEAR_PENDING &&
- irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
+ scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq);
@@ -1048,19 +1061,17 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
- /* check status - the request might have been killed by dyn detach */
+ /* check status - the request might have been killed by dyn detach */
if (cqr->status != DASD_CQR_IN_IO) {
- MESSAGE(KERN_DEBUG,
- "invalid status: bus_id %s, status %02x",
- dev_name(&cdev->dev), cqr->status);
+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
+ "status %02x", dev_name(&cdev->dev), cqr->status);
return;
}
- DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
- ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr);
+
next = NULL;
expires = 0;
- if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
- irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) {
+ if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+ scsw_cstat(&irb->scsw) == 0) {
/* request was completed successfully */
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;
@@ -1071,18 +1082,23 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
}
} else { /* error */
memcpy(&cqr->irb, irb, sizeof(struct irb));
+ /* log sense for every failed I/O to s390 debugfeature */
+ dasd_log_sense_dbf(cqr, irb);
if (device->features & DASD_FEATURE_ERPLOG) {
dasd_log_sense(cqr, irb);
}
+
/*
* If we don't want complex ERP for this request, then just
* reset this and retry it in the fastpath
*/
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) {
- DEV_MESSAGE(KERN_DEBUG, device,
- "default ERP in fastpath (%i retries left)",
- cqr->retries);
+ if (cqr->lpm == LPM_ANYPATH)
+ DBF_DEV_EVENT(DBF_DEBUG, device,
+ "default ERP in fastpath "
+ "(%i retries left)",
+ cqr->retries);
cqr->lpm = LPM_ANYPATH;
cqr->status = DASD_CQR_QUEUED;
next = cqr;
@@ -1093,10 +1109,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
(!device->stopped)) {
if (device->discipline->start_IO(next) == 0)
expires = next->expires;
- else
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
- "Interrupt fastpath "
- "failed!");
}
if (expires != 0)
dasd_device_set_timer(device, expires);
@@ -1169,6 +1181,7 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
struct dasd_block *block;
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
+ char errorstring[ERRORLENGTH];
list_for_each_safe(l, n, final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
@@ -1189,10 +1202,11 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
cqr->status = DASD_CQR_TERMINATED;
break;
default:
- DEV_MESSAGE(KERN_ERR, device,
- "wrong cqr status in __dasd_process_final_queue "
- "for cqr %p, status %x",
- cqr, cqr->status);
+ /* internal error 12 - wrong cqr status*/
+ snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", errorstring);
BUG();
}
if (cqr->callback != NULL)
@@ -1217,18 +1231,17 @@ static void __dasd_device_check_expire(struct dasd_device *device)
(time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
- DEV_MESSAGE(KERN_ERR, device,
- "internal error - timeout (%is) expired "
- "for cqr %p, termination failed, "
- "retrying in 5s",
- (cqr->expires/HZ), cqr);
+ dev_err(&device->cdev->dev,
+ "cqr %p timed out (%is) but cannot be "
+ "ended, retrying in 5 s\n",
+ cqr, (cqr->expires/HZ));
cqr->expires += 5*HZ;
dasd_device_set_timer(device, 5*HZ);
} else {
- DEV_MESSAGE(KERN_ERR, device,
- "internal error - timeout (%is) expired "
- "for cqr %p (%i retries left)",
- (cqr->expires/HZ), cqr, cqr->retries);
+ dev_err(&device->cdev->dev,
+ "cqr %p timed out (%is), %i retries "
+ "remaining\n", cqr, (cqr->expires/HZ),
+ cqr->retries);
}
}
}
@@ -1290,10 +1303,9 @@ int dasd_flush_device_queue(struct dasd_device *device)
rc = device->discipline->term_IO(cqr);
if (rc) {
/* unable to terminate requeust */
- DEV_MESSAGE(KERN_ERR, device,
- "dasd flush ccw_queue is unable "
- " to terminate request %p",
- cqr);
+ dev_err(&device->cdev->dev,
+ "Flushing the DASD request queue "
+ "failed for request %p\n", cqr);
/* stop flush processing */
goto finished;
}
@@ -1537,10 +1549,9 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
/* request in IO - terminate IO and release again */
rc = device->discipline->term_IO(cqr);
if (rc) {
- DEV_MESSAGE(KERN_ERR, device,
- "dasd_cancel_req is unable "
- " to terminate request %p, rc = %d",
- cqr, rc);
+ dev_err(&device->cdev->dev,
+ "Cancelling request %p failed with rc=%d\n",
+ cqr, rc);
} else {
cqr->stopclk = get_clock();
rc = 1;
@@ -1617,7 +1628,7 @@ static inline void __dasd_block_process_erp(struct dasd_block *block,
if (cqr->status == DASD_CQR_DONE)
DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
else
- DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
+ dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
erp_fn = device->discipline->erp_postaction(cqr);
erp_fn(cqr);
}
@@ -1991,8 +2002,11 @@ static void dasd_setup_queue(struct dasd_block *block)
blk_queue_max_sectors(block->request_queue, max);
blk_queue_max_phys_segments(block->request_queue, -1L);
blk_queue_max_hw_segments(block->request_queue, -1L);
- blk_queue_max_segment_size(block->request_queue, -1L);
- blk_queue_segment_boundary(block->request_queue, -1L);
+ /* with page sized segments we can translate each segement into
+ * one idaw/tidaw
+ */
+ blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
+ blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
}
@@ -2043,8 +2057,9 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
}
if (dasd_probeonly) {
- DEV_MESSAGE(KERN_INFO, base, "%s",
- "No access to device due to probeonly mode");
+ dev_info(&base->cdev->dev,
+ "Accessing the DASD failed because it is in "
+ "probeonly mode\n");
rc = -EPERM;
goto out;
}
@@ -2101,7 +2116,8 @@ dasd_device_operations = {
.owner = THIS_MODULE,
.open = dasd_open,
.release = dasd_release,
- .locked_ioctl = dasd_ioctl,
+ .ioctl = dasd_ioctl,
+ .compat_ioctl = dasd_ioctl,
.getgeo = dasd_getgeo,
};
@@ -2143,14 +2159,14 @@ int dasd_generic_probe(struct ccw_device *cdev,
ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
if (ret) {
- printk(KERN_WARNING
+ DBF_EVENT(DBF_WARNING,
"dasd_generic_probe: could not set ccw-device options "
"for %s\n", dev_name(&cdev->dev));
return ret;
}
ret = dasd_add_sysfs_files(cdev);
if (ret) {
- printk(KERN_WARNING
+ DBF_EVENT(DBF_WARNING,
"dasd_generic_probe: could not add sysfs entries "
"for %s\n", dev_name(&cdev->dev));
return ret;
@@ -2166,9 +2182,7 @@ int dasd_generic_probe(struct ccw_device *cdev,
(dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
ret = ccw_device_set_online(cdev);
if (ret)
- printk(KERN_WARNING
- "dasd_generic_probe: could not initially "
- "online ccw-device %s; return code: %d\n",
+ pr_warning("%s: Setting the DASD online failed with rc=%d\n",
dev_name(&cdev->dev), ret);
return 0;
}
@@ -2232,10 +2246,9 @@ int dasd_generic_set_online(struct ccw_device *cdev,
discipline = base_discipline;
if (device->features & DASD_FEATURE_USEDIAG) {
if (!dasd_diag_discipline_pointer) {
- printk (KERN_WARNING
- "dasd_generic couldn't online device %s "
- "- discipline DIAG not available\n",
- dev_name(&cdev->dev));
+ pr_warning("%s Setting the DASD online failed because "
+ "of missing DIAG discipline\n",
+ dev_name(&cdev->dev));
dasd_delete_device(device);
return -ENODEV;
}
@@ -2256,10 +2269,9 @@ int dasd_generic_set_online(struct ccw_device *cdev,
/* check_device will allocate block device if necessary */
rc = discipline->check_device(device);
if (rc) {
- printk (KERN_WARNING
- "dasd_generic couldn't online device %s "
- "with discipline %s rc=%i\n",
- dev_name(&cdev->dev), discipline->name, rc);
+ pr_warning("%s Setting the DASD online with discipline %s "
+ "failed with rc=%i\n",
+ dev_name(&cdev->dev), discipline->name, rc);
module_put(discipline->owner);
module_put(base_discipline->owner);
dasd_delete_device(device);
@@ -2268,9 +2280,8 @@ int dasd_generic_set_online(struct ccw_device *cdev,
dasd_set_target_state(device, DASD_STATE_ONLINE);
if (device->state <= DASD_STATE_KNOWN) {
- printk (KERN_WARNING
- "dasd_generic discipline not found for %s\n",
- dev_name(&cdev->dev));
+ pr_warning("%s Setting the DASD online failed because of a "
+ "missing discipline\n", dev_name(&cdev->dev));
rc = -ENODEV;
dasd_set_target_state(device, DASD_STATE_NEW);
if (device->block)
@@ -2314,13 +2325,13 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
- printk(KERN_WARNING "Can't offline dasd "
- "device with open count = %i.\n",
- open_count);
+ pr_warning("%s: The DASD cannot be set offline "
+ "with open count %i\n",
+ dev_name(&cdev->dev), open_count);
else
- printk(KERN_WARNING "%s",
- "Can't offline dasd device due "
- "to internal use\n");
+ pr_warning("%s: The DASD cannot be set offline "
+ "while it is in use\n",
+ dev_name(&cdev->dev));
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_put_device(device);
return -EBUSY;
@@ -2352,6 +2363,7 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
ret = 0;
switch (event) {
case CIO_GONE:
+ case CIO_BOXED:
case CIO_NO_PATH:
/* First of all call extended error reporting. */
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
@@ -2393,8 +2405,10 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
if (IS_ERR(cqr)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "Could not allocate RDC request");
+ /* internal error 13 - Allocating the RDC request failed*/
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", "13");
return cqr;
}
@@ -2431,6 +2445,40 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
}
EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
+/*
+ * In command mode and transport mode we need to look for sense
+ * data in different places. The sense data itself is allways
+ * an array of 32 bytes, so we can unify the sense data access
+ * for both modes.
+ */
+char *dasd_get_sense(struct irb *irb)
+{
+ struct tsb *tsb = NULL;
+ char *sense = NULL;
+
+ if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
+ if (irb->scsw.tm.tcw)
+ tsb = tcw_get_tsb((struct tcw *)(unsigned long)
+ irb->scsw.tm.tcw);
+ if (tsb && tsb->length == 64 && tsb->flags)
+ switch (tsb->flags & 0x07) {
+ case 1: /* tsa_iostat */
+ sense = tsb->tsa.iostat.sense;
+ break;
+ case 2: /* tsa_ddpc */
+ sense = tsb->tsa.ddpc.sense;
+ break;
+ default:
+ /* currently we don't use interrogate data */
+ break;
+ }
+ } else if (irb->esw.esw0.erw.cons) {
+ sense = irb->ecw;
+ }
+ return sense;
+}
+EXPORT_SYMBOL_GPL(dasd_get_sense);
+
static int __init dasd_init(void)
{
int rc;
@@ -2472,7 +2520,7 @@ static int __init dasd_init(void)
return 0;
failed:
- MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
+ pr_info("The DASD device driver could not be initialized\n");
dasd_exit();
return rc;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index d82aad5224f0..27991b692056 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -7,6 +7,8 @@
*
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/timer.h>
#include <linux/slab.h>
#include <asm/idals.h>
@@ -75,7 +77,7 @@ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
struct dasd_device *device = erp->startdev;
unsigned long flags;
- DEV_MESSAGE(KERN_INFO, device,
+ DBF_DEV_EVENT(DBF_INFO, device,
"blocking request queue for %is", expires/HZ);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
@@ -114,9 +116,9 @@ dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
} else {
/* issue a message and wait for 'device ready' interrupt */
- DEV_MESSAGE(KERN_ERR, device, "%s",
+ dev_err(&device->cdev->dev,
"is offline or not installed - "
- "INTERVENTION REQUIRED!!");
+ "INTERVENTION REQUIRED!!\n");
dasd_3990_erp_block_queue(erp, 60*HZ);
}
@@ -158,7 +160,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
if ((erp->lpm & opm) != 0x00) {
- DEV_MESSAGE(KERN_DEBUG, device,
+ DBF_DEV_EVENT(DBF_WARNING, device,
"try alternate lpm=%x (lpum=%x / opm=%x)",
erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
@@ -166,10 +168,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
erp->status = DASD_CQR_FILLED;
erp->retries = 10;
} else {
- DEV_MESSAGE(KERN_ERR, device,
- "No alternate channel path left (lpum=%x / "
- "opm=%x) -> permanent error",
- erp->irb.esw.esw0.sublog.lpum, opm);
+ dev_err(&device->cdev->dev,
+ "The DASD cannot be reached on any path (lpum=%x"
+ "/opm=%x)\n", erp->irb.esw.esw0.sublog.lpum, opm);
/* post request with permanent error */
erp->status = DASD_CQR_FAILED;
@@ -204,8 +205,8 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
sizeof(struct DCTL_data),
device);
if (IS_ERR(dctl_cqr)) {
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "Unable to allocate DCTL-CQR");
+ dev_err(&device->cdev->dev,
+ "Unable to allocate DCTL-CQR\n");
erp->status = DASD_CQR_FAILED;
return erp;
}
@@ -294,7 +295,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
/* interrupt (this enables easier enqueing of the cqr) */
if (erp->function != dasd_3990_erp_action_4) {
- DEV_MESSAGE(KERN_INFO, device, "%s",
+ DBF_DEV_EVENT(DBF_INFO, device, "%s",
"dasd_3990_erp_action_4: first time retry");
erp->retries = 256;
@@ -303,7 +304,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
} else {
if (sense && (sense[25] == 0x1D)) { /* state change pending */
- DEV_MESSAGE(KERN_INFO, device,
+ DBF_DEV_EVENT(DBF_INFO, device,
"waiting for state change pending "
"interrupt, %d retries left",
erp->retries);
@@ -311,15 +312,14 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
dasd_3990_erp_block_queue(erp, 30*HZ);
} else if (sense && (sense[25] == 0x1E)) { /* busy */
- DEV_MESSAGE(KERN_INFO, device,
+ DBF_DEV_EVENT(DBF_INFO, device,
"busy - redriving request later, "
"%d retries left",
erp->retries);
dasd_3990_erp_block_queue(erp, HZ);
} else {
-
/* no state change pending - retry */
- DEV_MESSAGE (KERN_INFO, device,
+ DBF_DEV_EVENT(DBF_INFO, device,
"redriving request immediately, "
"%d retries left",
erp->retries);
@@ -384,6 +384,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
struct dasd_device *device = erp->startdev;
char msg_format = (sense[7] & 0xF0);
char msg_no = (sense[7] & 0x0F);
+ char errorstring[ERRORLENGTH];
switch (msg_format) {
case 0x00: /* Format 0 - Program or System Checks */
@@ -394,95 +395,97 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
case 0x00: /* No Message */
break;
case 0x01:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 0 - Invalid Command");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Invalid Command\n");
break;
case 0x02:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - Invalid Command "
- "Sequence");
+ "Sequence\n");
break;
case 0x03:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - CCW Count less than "
- "required");
+ "required\n");
break;
case 0x04:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 0 - Invalid Parameter");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Invalid Parameter\n");
break;
case 0x05:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 0 - Diagnostic of Sepecial"
- " Command Violates File Mask");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Diagnostic of Special"
+ " Command Violates File Mask\n");
break;
case 0x07:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - Channel Returned with "
- "Incorrect retry CCW");
+ "Incorrect retry CCW\n");
break;
case 0x08:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 0 - Reset Notification");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Reset Notification\n");
break;
case 0x09:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 0 - Storage Path Restart");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Storage Path Restart\n");
break;
case 0x0A:
- DEV_MESSAGE(KERN_WARNING, device,
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - Channel requested "
- "... %02x", sense[8]);
+ "... %02x\n", sense[8]);
break;
case 0x0B:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - Invalid Defective/"
- "Alternate Track Pointer");
+ "Alternate Track Pointer\n");
break;
case 0x0C:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - DPS Installation "
- "Check");
+ "Check\n");
break;
case 0x0E:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - Command Invalid on "
- "Secondary Address");
+ "Secondary Address\n");
break;
case 0x0F:
- DEV_MESSAGE(KERN_WARNING, device,
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - Status Not As "
- "Required: reason %02x", sense[8]);
+ "Required: reason %02x\n",
+ sense[8]);
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 0 - Reseved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Reserved\n");
}
} else {
switch (msg_no) {
case 0x00: /* No Message */
break;
case 0x01:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 0 - Device Error Source");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Device Error "
+ "Source\n");
break;
case 0x02:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 0 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Reserved\n");
break;
case 0x03:
- DEV_MESSAGE(KERN_WARNING, device,
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - Device Fenced - "
- "device = %02x", sense[4]);
+ "device = %02x\n", sense[4]);
break;
case 0x04:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 0 - Data Pinned for "
- "Device");
+ "Device\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 0 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Reserved\n");
}
}
break;
@@ -492,348 +495,352 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
case 0x00: /* No Message */
break;
case 0x01:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 1 - Device Status 1 not as "
- "expected");
+ "expected\n");
break;
case 0x03:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 1 - Index missing");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Index missing\n");
break;
case 0x04:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 1 - Interruption cannot be reset");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Interruption cannot be "
+ "reset\n");
break;
case 0x05:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 1 - Device did not respond to "
- "selection");
+ "selection\n");
break;
case 0x06:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 1 - Device check-2 error or Set "
- "Sector is not complete");
+ "Sector is not complete\n");
break;
case 0x07:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 1 - Head address does not "
- "compare");
+ "compare\n");
break;
case 0x08:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 1 - Device status 1 not valid");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Device status 1 not valid\n");
break;
case 0x09:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 1 - Device not ready");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Device not ready\n");
break;
case 0x0A:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 1 - Track physical address did "
- "not compare");
+ "not compare\n");
break;
case 0x0B:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 1 - Missing device address bit");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Missing device address bit\n");
break;
case 0x0C:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 1 - Drive motor switch is off");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Drive motor switch is off\n");
break;
case 0x0D:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 1 - Seek incomplete");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Seek incomplete\n");
break;
case 0x0E:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 1 - Cylinder address did not "
- "compare");
+ "compare\n");
break;
case 0x0F:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 1 - Offset active cannot be "
- "reset");
+ "reset\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 1 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Reserved\n");
}
break;
case 0x20: /* Format 2 - 3990 Equipment Checks */
switch (msg_no) {
case 0x08:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 2 - 3990 check-2 error");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 2 - 3990 check-2 error\n");
break;
case 0x0E:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 2 - Support facility errors");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 2 - Support facility errors\n");
break;
case 0x0F:
- DEV_MESSAGE(KERN_WARNING, device,
- "FORMAT 2 - Microcode detected error %02x",
- sense[8]);
+ dev_warn(&device->cdev->dev,
+ "FORMAT 2 - Microcode detected error "
+ "%02x\n",
+ sense[8]);
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 2 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 2 - Reserved\n");
}
break;
case 0x30: /* Format 3 - 3990 Control Checks */
switch (msg_no) {
case 0x0F:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 3 - Allegiance terminated");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 3 - Allegiance terminated\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 3 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 3 - Reserved\n");
}
break;
case 0x40: /* Format 4 - Data Checks */
switch (msg_no) {
case 0x00:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 4 - Home address area error");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Home address area error\n");
break;
case 0x01:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 4 - Count area error");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Count area error\n");
break;
case 0x02:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 4 - Key area error");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Key area error\n");
break;
case 0x03:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 4 - Data area error");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Data area error\n");
break;
case 0x04:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in home address "
- "area");
+ "area\n");
break;
case 0x05:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in count address "
- "area");
+ "area\n");
break;
case 0x06:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 4 - No sync byte in key area");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in key area\n");
break;
case 0x07:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 4 - No sync byte in data area");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in data area\n");
break;
case 0x08:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - Home address area error; "
- "offset active");
+ "offset active\n");
break;
case 0x09:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - Count area error; offset "
- "active");
+ "active\n");
break;
case 0x0A:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - Key area error; offset "
- "active");
+ "active\n");
break;
case 0x0B:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - Data area error; "
- "offset active");
+ "offset active\n");
break;
case 0x0C:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in home "
- "address area; offset active");
+ "address area; offset active\n");
break;
case 0x0D:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - No syn byte in count "
- "address area; offset active");
+ "address area; offset active\n");
break;
case 0x0E:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in key area; "
- "offset active");
+ "offset active\n");
break;
case 0x0F:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 4 - No syn byte in data area; "
- "offset active");
+ "offset active\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 4 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Reserved\n");
}
break;
case 0x50: /* Format 5 - Data Check with displacement information */
switch (msg_no) {
case 0x00:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the "
- "home address area");
+ "home address area\n");
break;
case 0x01:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 5 - Data Check in the count area");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the count "
+ "area\n");
break;
case 0x02:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 5 - Data Check in the key area");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the key area\n");
break;
case 0x03:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 5 - Data Check in the data area");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the data "
+ "area\n");
break;
case 0x08:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the "
- "home address area; offset active");
+ "home address area; offset active\n");
break;
case 0x09:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the count area; "
- "offset active");
+ "offset active\n");
break;
case 0x0A:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the key area; "
- "offset active");
+ "offset active\n");
break;
case 0x0B:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the data area; "
- "offset active");
+ "offset active\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 5 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Reserved\n");
}
break;
case 0x60: /* Format 6 - Usage Statistics/Overrun Errors */
switch (msg_no) {
case 0x00:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 6 - Overrun on channel A");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel A\n");
break;
case 0x01:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 6 - Overrun on channel B");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel B\n");
break;
case 0x02:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 6 - Overrun on channel C");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel C\n");
break;
case 0x03:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 6 - Overrun on channel D");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel D\n");
break;
case 0x04:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 6 - Overrun on channel E");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel E\n");
break;
case 0x05:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 6 - Overrun on channel F");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel F\n");
break;
case 0x06:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 6 - Overrun on channel G");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel G\n");
break;
case 0x07:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 6 - Overrun on channel H");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel H\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 6 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Reserved\n");
}
break;
case 0x70: /* Format 7 - Device Connection Control Checks */
switch (msg_no) {
case 0x00:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - RCC initiated by a connection "
- "check alert");
+ "check alert\n");
break;
case 0x01:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - RCC 1 sequence not "
- "successful");
+ "successful\n");
break;
case 0x02:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - RCC 1 and RCC 2 sequences not "
- "successful");
+ "successful\n");
break;
case 0x03:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - Invalid tag-in during "
- "selection sequence");
+ "selection sequence\n");
break;
case 0x04:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 7 - extra RCC required");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - extra RCC required\n");
break;
case 0x05:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - Invalid DCC selection "
- "response or timeout");
+ "response or timeout\n");
break;
case 0x06:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - Missing end operation; device "
- "transfer complete");
+ "transfer complete\n");
break;
case 0x07:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - Missing end operation; device "
- "transfer incomplete");
+ "transfer incomplete\n");
break;
case 0x08:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - Invalid tag-in for an "
- "immediate command sequence");
+ "immediate command sequence\n");
break;
case 0x09:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - Invalid tag-in for an "
- "extended command sequence");
+ "extended command sequence\n");
break;
case 0x0A:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - 3990 microcode time out when "
- "stopping selection");
+ "stopping selection\n");
break;
case 0x0B:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - No response to selection "
- "after a poll interruption");
+ "after a poll interruption\n");
break;
case 0x0C:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - Permanent path error (DASD "
- "controller not available)");
+ "controller not available)\n");
break;
case 0x0D:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 7 - DASD controller not available"
- " on disconnected command chain");
+ " on disconnected command chain\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 7 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - Reserved\n");
}
break;
@@ -841,52 +848,52 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
switch (msg_no) {
case 0x00: /* No Message */
case 0x01:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 8 - Error correction code "
- "hardware fault");
+ "hardware fault\n");
break;
case 0x03:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 8 - Unexpected end operation "
- "response code");
+ "response code\n");
break;
case 0x04:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 8 - End operation with transfer "
- "count not zero");
+ "count not zero\n");
break;
case 0x05:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 8 - End operation with transfer "
- "count zero");
+ "count zero\n");
break;
case 0x06:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 8 - DPS checks after a system "
- "reset or selective reset");
+ "reset or selective reset\n");
break;
case 0x07:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 8 - DPS cannot be filled");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - DPS cannot be filled\n");
break;
case 0x08:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 8 - Short busy time-out during "
- "device selection");
+ "device selection\n");
break;
case 0x09:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 8 - DASD controller failed to "
- "set or reset the long busy latch");
+ "set or reset the long busy latch\n");
break;
case 0x0A:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 8 - No interruption from device "
- "during a command chain");
+ "during a command chain\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 8 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - Reserved\n");
}
break;
@@ -895,97 +902,100 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
case 0x00:
break; /* No Message */
case 0x06:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 9 - Device check-2 error");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 9 - Device check-2 error\n");
break;
case 0x07:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 9 - Head address did not compare");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 9 - Head address did not "
+ "compare\n");
break;
case 0x0A:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 9 - Track physical address did "
- "not compare while oriented");
+ "not compare while oriented\n");
break;
case 0x0E:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT 9 - Cylinder address did not "
- "compare");
+ "compare\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT 9 - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT 9 - Reserved\n");
}
break;
case 0xF0: /* Format F - Cache Storage Checks */
switch (msg_no) {
case 0x00:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT F - Operation Terminated");
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Operation Terminated\n");
break;
case 0x01:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT F - Subsystem Processing Error");
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Subsystem Processing Error\n");
break;
case 0x02:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT F - Cache or nonvolatile storage "
- "equipment failure");
+ "equipment failure\n");
break;
case 0x04:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT F - Caching terminated");
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Caching terminated\n");
break;
case 0x06:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT F - Cache fast write access not "
- "authorized");
+ "authorized\n");
break;
case 0x07:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT F - Track format incorrect");
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Track format incorrect\n");
break;
case 0x09:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT F - Caching reinitiated");
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Caching reinitiated\n");
break;
case 0x0A:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT F - Nonvolatile storage "
- "terminated");
+ "terminated\n");
break;
case 0x0B:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT F - Volume is suspended duplex");
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Volume is suspended duplex\n");
/* call extended error reporting (EER) */
dasd_eer_write(device, erp->refers,
DASD_EER_PPRCSUSPEND);
break;
case 0x0C:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT F - Subsystem status connot be "
- "determined");
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Subsystem status cannot be "
+ "determined\n");
break;
case 0x0D:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ dev_warn(&device->cdev->dev,
"FORMAT F - Caching status reset to "
- "default");
+ "default\n");
break;
case 0x0E:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT F - DASD Fast Write inhibited");
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - DASD Fast Write inhibited\n");
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "FORMAT D - Reserved");
+ dev_warn(&device->cdev->dev,
+ "FORMAT D - Reserved\n");
}
break;
- default: /* unknown message format - should not happen */
- DEV_MESSAGE (KERN_WARNING, device,
- "unknown message format %02x",
- msg_format);
+ default: /* unknown message format - should not happen
+ internal error 03 - unknown message format */
+ snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format);
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", errorstring);
break;
} /* end switch message format */
@@ -1015,7 +1025,7 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
/* env data present (ACTION 10 - retry should work) */
if (sense[2] & SNS2_ENV_DATA_PRESENT) {
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Command Reject - environmental data present");
dasd_3990_handle_env_data(erp, sense);
@@ -1023,9 +1033,10 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
erp->retries = 5;
} else {
- /* fatal error - set status to FAILED */
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "Command Reject - Fatal error");
+ /* fatal error - set status to FAILED
+ internal error 09 - Command Reject */
+ dev_err(&device->cdev->dev, "An error occurred in the DASD "
+ "device driver, reason=%s\n", "09");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
@@ -1061,7 +1072,7 @@ dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
} else {
/* issue a message and wait for 'device ready' interrupt */
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"bus out parity error or BOPC requested by "
"channel");
@@ -1093,21 +1104,19 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_equip_check;
if (sense[1] & SNS1_WRITE_INHIBITED) {
+ dev_info(&device->cdev->dev,
+ "Write inhibited path encountered\n");
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
- "Write inhibited path encountered");
-
- /* vary path offline */
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "Path should be varied off-line. "
- "This is not implemented yet \n - please report "
- "to linux390@de.ibm.com");
+ /* vary path offline
+ internal error 04 - Path should be varied off-line.*/
+ dev_err(&device->cdev->dev, "An error occurred in the DASD "
+ "device driver, reason=%s\n", "04");
erp = dasd_3990_erp_action_1(erp);
} else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Equipment Check - " "environmental data present");
dasd_3990_handle_env_data(erp, sense);
@@ -1116,7 +1125,7 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
} else if (sense[1] & SNS1_PERM_ERR) {
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Equipment Check - retry exhausted or "
"undesirable");
@@ -1125,7 +1134,7 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
} else {
/* all other equipment checks - Action 5 */
/* rest is done when retries == 0 */
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Equipment check or processing error");
erp = dasd_3990_erp_action_5(erp);
@@ -1156,9 +1165,9 @@ dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
if (sense[2] & SNS2_CORRECTABLE) { /* correctable data check */
/* issue message that the data has been corrected */
- DEV_MESSAGE(KERN_EMERG, device, "%s",
+ dev_emerg(&device->cdev->dev,
"Data recovered during retry with PCI "
- "fetch mode active");
+ "fetch mode active\n");
/* not possible to handle this situation in Linux */
panic("No way to inform application about the possibly "
@@ -1166,7 +1175,7 @@ dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
} else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Uncorrectable data check recovered secondary "
"addr of duplex pair");
@@ -1174,7 +1183,7 @@ dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
} else if (sense[1] & SNS1_PERM_ERR) {
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Uncorrectable data check with internal "
"retry exhausted");
@@ -1182,7 +1191,7 @@ dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
} else {
/* all other data checks */
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Uncorrectable data check with retry count "
"exhausted...");
@@ -1212,7 +1221,7 @@ dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_overrun;
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Overrun - service overrun or overrun"
" error requested by channel");
@@ -1243,7 +1252,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
if (sense[2] & SNS2_ENV_DATA_PRESENT) {
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Track format error when destaging or "
"staging data");
@@ -1252,8 +1261,10 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_action_4(erp, sense);
} else {
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "Invalid Track Format - Fatal error");
+ /* internal error 06 - The track format is not valid*/
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", "06");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
@@ -1279,8 +1290,8 @@ dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
struct dasd_device *device = default_erp->startdev;
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "End-of-Cylinder - must never happen");
+ dev_err(&device->cdev->dev,
+ "The cylinder data for accessing the DASD is inconsistent\n");
/* implement action 7 - BUG */
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
@@ -1306,7 +1317,7 @@ dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_env_data;
- DEV_MESSAGE(KERN_DEBUG, device, "%s", "Environmental data present");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Environmental data present");
dasd_3990_handle_env_data(erp, sense);
@@ -1339,8 +1350,8 @@ dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
struct dasd_device *device = default_erp->startdev;
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "No Record Found - Fatal error ");
+ dev_err(&device->cdev->dev,
+ "The specified record was not found\n");
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
@@ -1365,7 +1376,8 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
struct dasd_device *device = erp->startdev;
- DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected");
+ dev_err(&device->cdev->dev, "Accessing the DASD failed because of "
+ "a hardware error\n");
return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
@@ -1394,7 +1406,7 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
if (cqr->block &&
(cqr->block->base != cqr->startdev)) {
if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
- DEV_MESSAGE(KERN_ERR, cqr->startdev,
+ DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
"ERP on alias device for request %p,"
" recover on base device %s", cqr,
dev_name(&cqr->block->base->cdev->dev));
@@ -1511,7 +1523,7 @@ dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
erp->retries = 256;
erp->function = dasd_3990_erp_action_10_32;
- DEV_MESSAGE(KERN_DEBUG, device, "%s", "Perform logging requested");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Perform logging requested");
return erp;
@@ -1549,7 +1561,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
char *LO_data; /* LO_eckd_data_t */
struct ccw1 *ccw, *oldccw;
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Write not finished because of unexpected condition");
default_erp->function = dasd_3990_erp_action_1B_32;
@@ -1561,10 +1573,16 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
cqr = cqr->refers;
}
+ if (scsw_is_tm(&cqr->irb.scsw)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "32 bit sense, action 1B is not defined"
+ " in transport mode - just retry");
+ return default_erp;
+ }
+
/* for imprecise ending just do default erp */
if (sense[1] & 0x01) {
-
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Imprecise ending is set - just retry");
return default_erp;
@@ -1575,8 +1593,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
cpa = default_erp->refers->irb.scsw.cmd.cpa;
if (cpa == 0) {
-
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Unable to determine address of the CCW "
"to be restarted");
@@ -1590,7 +1607,9 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
sizeof(struct LO_eckd_data), device);
if (IS_ERR(erp)) {
- DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP");
+ /* internal error 01 - Unable to allocate ERP */
+ dev_err(&device->cdev->dev, "An error occurred in the DASD "
+ "device driver, reason=%s\n", "01");
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
}
@@ -1599,7 +1618,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
oldccw = cqr->cpaddr;
if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) {
PFX_data = cqr->data;
- memcpy(DE_data, &PFX_data->define_extend,
+ memcpy(DE_data, &PFX_data->define_extent,
sizeof(struct DE_eckd_data));
} else
memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data));
@@ -1608,10 +1627,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
LO_data = erp->data + sizeof(struct DE_eckd_data);
if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
-
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "BUG - this should not happen");
-
+ /* should not */
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
}
@@ -1701,7 +1717,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
char *LO_data; /* struct LO_eckd_data */
struct ccw1 *ccw;
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Write not finished because of unexpected condition"
" - follow on");
@@ -1712,10 +1728,16 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
cqr = cqr->refers;
}
+ if (scsw_is_tm(&cqr->irb.scsw)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "32 bit sense, action 1B, update,"
+ " in transport mode - just retry");
+ return previous_erp;
+ }
+
/* for imprecise ending just do default erp */
if (sense[1] & 0x01) {
-
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Imprecise ending is set - just retry");
previous_erp->status = DASD_CQR_FILLED;
@@ -1728,10 +1750,10 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
cpa = previous_erp->irb.scsw.cmd.cpa;
if (cpa == 0) {
-
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
- "Unable to determine address of the CCW "
- "to be restarted");
+ /* internal error 02 -
+ Unable to determine address of the CCW to be restarted */
+ dev_err(&device->cdev->dev, "An error occurred in the DASD "
+ "device driver, reason=%s\n", "02");
previous_erp->status = DASD_CQR_FAILED;
@@ -1744,10 +1766,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
LO_data = erp->data + sizeof(struct DE_eckd_data);
if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
-
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "BUG - this should not happen");
-
+ /* should not happen */
previous_erp->status = DASD_CQR_FAILED;
return previous_erp;
@@ -1935,14 +1954,13 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
- /* set to suspended duplex state then restart */
+ /* set to suspended duplex state then restart
+ internal error 05 - Set device to suspended duplex state
+ should be done */
struct dasd_device *device = erp->startdev;
-
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "Set device to suspended duplex state should be "
- "done!\n"
- "This is not implemented yet (for compound ERP)"
- " - please report to linux390@de.ibm.com");
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", "05");
}
@@ -2012,15 +2030,14 @@ dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
{
/* print message according to log or message to operator mode */
if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
-
/* print SIM SRC from RefCode */
- DEV_MESSAGE(KERN_ERR, device, "SIM - SRC: "
- "%02x%02x%02x%02x", sense[22],
+ dev_err(&device->cdev->dev, "SIM - SRC: "
+ "%02x%02x%02x%02x\n", sense[22],
sense[23], sense[11], sense[12]);
} else if (sense[24] & DASD_SIM_LOG) {
/* print SIM SRC Refcode */
- DEV_MESSAGE(KERN_WARNING, device, "SIM - SRC: "
- "%02x%02x%02x%02x", sense[22],
+ dev_warn(&device->cdev->dev, "log SIM - SRC: "
+ "%02x%02x%02x%02x\n", sense[22],
sense[23], sense[11], sense[12]);
}
}
@@ -2063,14 +2080,14 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
switch (sense[25]) {
case 0x00: /* success - use default ERP for retries */
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"ERP called for successful request"
" - just retry");
break;
case 0x01: /* fatal error */
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "Retry not recommended - Fatal error");
+ dev_err(&device->cdev->dev,
+ "ERP failed for the DASD\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
@@ -2080,13 +2097,10 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_int_req(erp);
break;
- case 0x0F: /* length mismatch during update write command */
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "update write command error - should not "
- "happen;\n"
- "Please send this message together with "
- "the above sense data to linux390@de."
- "ibm.com");
+ case 0x0F: /* length mismatch during update write command
+ internal error 08 - update write command error*/
+ dev_err(&device->cdev->dev, "An error occurred in the "
+ "DASD device driver, reason=%s\n", "08");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
@@ -2095,13 +2109,12 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_action_10_32(erp, sense);
break;
- case 0x15: /* next track outside defined extend */
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "next track outside defined extend - "
- "should not happen;\n"
- "Please send this message together with "
- "the above sense data to linux390@de."
- "ibm.com");
+ case 0x15: /* next track outside defined extend
+ internal error 07 - The next track is not
+ within the defined storage extent */
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", "07");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
@@ -2112,9 +2125,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
break;
case 0x1C: /* invalid data */
- DEV_MESSAGE(KERN_EMERG, device, "%s",
+ dev_emerg(&device->cdev->dev,
"Data recovered during retry with PCI "
- "fetch mode active");
+ "fetch mode active\n");
/* not possible to handle this situation in Linux */
panic
@@ -2123,7 +2136,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
break;
case 0x1D: /* state-change pending */
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"A State change pending condition exists "
"for the subsystem or device");
@@ -2131,7 +2144,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
break;
case 0x1E: /* busy */
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Busy condition exists "
"for the subsystem or device");
erp = dasd_3990_erp_action_4(erp, sense);
@@ -2171,9 +2184,9 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
{
struct dasd_device *device = erp->startdev;
- if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK
+ if (scsw_cstat(&erp->refers->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK
| SCHN_STAT_CHN_CTRL_CHK)) {
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"channel or interface control check");
erp = dasd_3990_erp_action_4(erp, NULL);
}
@@ -2193,21 +2206,23 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
* erp_new contens was possibly modified
*/
static struct dasd_ccw_req *
-dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
+dasd_3990_erp_inspect(struct dasd_ccw_req *erp)
{
struct dasd_ccw_req *erp_new = NULL;
- /* sense data are located in the refers record of the */
- /* already set up new ERP ! */
- char *sense = erp->refers->irb.ecw;
+ char *sense;
/* if this problem occured on an alias retry on base */
erp_new = dasd_3990_erp_inspect_alias(erp);
if (erp_new)
return erp_new;
- /* check if no concurrent sens is available */
- if (!erp->refers->irb.esw.esw0.erw.cons)
+ /* sense data are located in the refers record of the
+ * already set up new ERP !
+ * check if concurrent sens is available
+ */
+ sense = dasd_get_sense(&erp->refers->irb);
+ if (!sense)
erp_new = dasd_3990_erp_control_check(erp);
/* distinguish between 24 and 32 byte sense data */
else if (sense[27] & DASD_SENSE_BIT_0) {
@@ -2231,7 +2246,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
* DESCRIPTION
* This funtion adds an additional request block (ERP) to the head of
* the given cqr (or erp).
- * This erp is initialized as an default erp (retry TIC)
+ * For a command mode cqr the erp is initialized as an default erp
+ * (retry TIC).
+ * For transport mode we make a copy of the original TCW (points to
+ * the original TCCB, TIDALs, etc.) but give it a fresh
+ * TSB so the original sense data will not be changed.
*
* PARAMETER
* cqr head of the current ERP-chain (or single cqr if
@@ -2239,25 +2258,35 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
* RETURN VALUES
* erp pointer to new ERP-chain head
*/
-static struct dasd_ccw_req *
-dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
+static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
{
struct dasd_device *device = cqr->startdev;
struct ccw1 *ccw;
-
- /* allocate additional request block */
struct dasd_ccw_req *erp;
+ int cplength, datasize;
+ struct tcw *tcw;
+ struct tsb *tsb;
+
+ if (cqr->cpmode == 1) {
+ cplength = 0;
+ datasize = sizeof(struct tcw) + sizeof(struct tsb);
+ } else {
+ cplength = 2;
+ datasize = 0;
+ }
- erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, device);
+ /* allocate additional request block */
+ erp = dasd_alloc_erp_request((char *) &cqr->magic,
+ cplength, datasize, device);
if (IS_ERR(erp)) {
if (cqr->retries <= 0) {
- DEV_MESSAGE(KERN_ERR, device, "%s",
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
"Unable to allocate ERP request");
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_clock ();
} else {
- DEV_MESSAGE (KERN_ERR, device,
+ DBF_DEV_EVENT(DBF_ERR, device,
"Unable to allocate ERP request "
"(%i retries left)",
cqr->retries);
@@ -2266,13 +2295,24 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
return cqr;
}
- /* initialize request with default TIC to current ERP/CQR */
- ccw = erp->cpaddr;
- ccw->cmd_code = CCW_CMD_NOOP;
- ccw->flags = CCW_FLAG_CC;
- ccw++;
- ccw->cmd_code = CCW_CMD_TIC;
- ccw->cda = (long)(cqr->cpaddr);
+ if (cqr->cpmode == 1) {
+ /* make a shallow copy of the original tcw but set new tsb */
+ erp->cpmode = 1;
+ erp->cpaddr = erp->data;
+ tcw = erp->data;
+ tsb = (struct tsb *) &tcw[1];
+ *tcw = *((struct tcw *)cqr->cpaddr);
+ tcw->tsb = (long)tsb;
+ } else {
+ /* initialize request with default TIC to current ERP/CQR */
+ ccw = erp->cpaddr;
+ ccw->cmd_code = CCW_CMD_NOOP;
+ ccw->flags = CCW_FLAG_CC;
+ ccw++;
+ ccw->cmd_code = CCW_CMD_TIC;
+ ccw->cda = (long)(cqr->cpaddr);
+ }
+
erp->function = dasd_3990_erp_add_erp;
erp->refers = cqr;
erp->startdev = device;
@@ -2282,7 +2322,6 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
erp->expires = 0;
erp->retries = 256;
erp->buildclk = get_clock();
-
erp->status = DASD_CQR_FILLED;
return erp;
@@ -2340,28 +2379,33 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
* match 'boolean' for match found
* returns 1 if match found, otherwise 0.
*/
-static int
-dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
+static int dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1,
+ struct dasd_ccw_req *cqr2)
{
+ char *sense1, *sense2;
if (cqr1->startdev != cqr2->startdev)
return 0;
- if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons)
- return 0;
+ sense1 = dasd_get_sense(&cqr1->irb);
+ sense2 = dasd_get_sense(&cqr2->irb);
- if ((cqr1->irb.esw.esw0.erw.cons == 0) &&
- (cqr2->irb.esw.esw0.erw.cons == 0)) {
- if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
- SCHN_STAT_CHN_CTRL_CHK)) ==
- (cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
- SCHN_STAT_CHN_CTRL_CHK)))
+ /* one request has sense data, the other not -> no match, return 0 */
+ if (!sense1 != !sense2)
+ return 0;
+ /* no sense data in both cases -> check cstat for IFCC */
+ if (!sense1 && !sense2) {
+ if ((scsw_cstat(&cqr1->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_CTRL_CHK)) ==
+ (scsw_cstat(&cqr2->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_CTRL_CHK)))
return 1; /* match with ifcc*/
}
/* check sense data; byte 0-2,25,27 */
- if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) &&
- (cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) &&
- (cqr1->irb.ecw[25] == cqr2->irb.ecw[25]))) {
+ if (!(sense1 && sense2 &&
+ (memcmp(sense1, sense2, 3) == 0) &&
+ (sense1[27] == sense2[27]) &&
+ (sense1[25] == sense2[25]))) {
return 0; /* sense doesn't match */
}
@@ -2434,7 +2478,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
{
struct dasd_device *device = erp->startdev;
- char *sense = erp->irb.ecw;
+ char *sense = dasd_get_sense(&erp->irb);
/* check for 24 byte sense ERP */
if ((erp->function == dasd_3990_erp_bus_out) ||
@@ -2449,7 +2493,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
/* prepare erp for retry on different channel path */
erp = dasd_3990_erp_action_1(erp);
- if (!(sense[2] & DASD_SENSE_BIT_0)) {
+ if (sense && !(sense[2] & DASD_SENSE_BIT_0)) {
/* issue a Diagnostic Control command with an
* Inhibit Write subcommand */
@@ -2471,7 +2515,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
break;
}
default:
- DEV_MESSAGE(KERN_DEBUG, device,
+ DBF_DEV_EVENT(DBF_WARNING, device,
"invalid subcommand modifier 0x%x "
"for Diagnostic Control Command",
sense[25]);
@@ -2479,19 +2523,21 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
}
/* check for 32 byte sense ERP */
- } else if ((erp->function == dasd_3990_erp_compound_retry) ||
- (erp->function == dasd_3990_erp_compound_path) ||
- (erp->function == dasd_3990_erp_compound_code) ||
- (erp->function == dasd_3990_erp_compound_config)) {
+ } else if (sense &&
+ ((erp->function == dasd_3990_erp_compound_retry) ||
+ (erp->function == dasd_3990_erp_compound_path) ||
+ (erp->function == dasd_3990_erp_compound_code) ||
+ (erp->function == dasd_3990_erp_compound_config))) {
erp = dasd_3990_erp_compound(erp, sense);
} else {
- /* No retry left and no additional special handling */
- /*necessary */
- DEV_MESSAGE(KERN_ERR, device,
- "no retries left for erp %p - "
- "set status to FAILED", erp);
+ /*
+ * No retry left and no additional special handling
+ * necessary
+ */
+ dev_err(&device->cdev->dev,
+ "ERP %p has run out of retries and failed\n", erp);
erp->status = DASD_CQR_FAILED;
}
@@ -2548,24 +2594,25 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
if (erp->retries > 0) {
- char *sense = erp->refers->irb.ecw;
+ char *sense = dasd_get_sense(&erp->refers->irb);
/* check for special retries */
- if (erp->function == dasd_3990_erp_action_4) {
+ if (sense && erp->function == dasd_3990_erp_action_4) {
erp = dasd_3990_erp_action_4(erp, sense);
- } else if (erp->function == dasd_3990_erp_action_1B_32) {
+ } else if (sense &&
+ erp->function == dasd_3990_erp_action_1B_32) {
erp = dasd_3990_update_1B(erp, sense);
- } else if (erp->function == dasd_3990_erp_int_req) {
+ } else if (sense && erp->function == dasd_3990_erp_int_req) {
erp = dasd_3990_erp_int_req(erp);
} else {
/* simple retry */
- DEV_MESSAGE(KERN_DEBUG, device,
+ DBF_DEV_EVENT(DBF_DEBUG, device,
"%i retries left for erp %p",
erp->retries, erp);
@@ -2609,24 +2656,24 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "ERP chain at BEGINNING of ERP-ACTION");
+ dev_err(&device->cdev->dev,
+ "ERP chain at BEGINNING of ERP-ACTION\n");
for (temp_erp = cqr;
temp_erp != NULL; temp_erp = temp_erp->refers) {
- DEV_MESSAGE(KERN_ERR, device,
- " erp %p (%02x) refers to %p",
+ dev_err(&device->cdev->dev,
+ "ERP %p (%02x) refers to %p\n",
temp_erp, temp_erp->status,
temp_erp->refers);
}
}
/* double-check if current erp/cqr was successful */
- if ((cqr->irb.scsw.cmd.cstat == 0x00) &&
- (cqr->irb.scsw.cmd.dstat ==
+ if ((scsw_cstat(&cqr->irb.scsw) == 0x00) &&
+ (scsw_dstat(&cqr->irb.scsw) ==
(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
- DEV_MESSAGE(KERN_DEBUG, device,
+ DBF_DEV_EVENT(DBF_DEBUG, device,
"ERP called for successful request %p"
" - NO ERP necessary", cqr);
@@ -2648,13 +2695,13 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "ERP chain at END of ERP-ACTION");
+ dev_err(&device->cdev->dev,
+ "ERP chain at END of ERP-ACTION\n");
for (temp_erp = erp;
temp_erp != NULL; temp_erp = temp_erp->refers) {
- DEV_MESSAGE(KERN_ERR, device,
- " erp %p (%02x) refers to %p",
+ dev_err(&device->cdev->dev,
+ "ERP %p (%02x) refers to %p\n",
temp_erp, temp_erp->status,
temp_erp->refers);
}
@@ -2667,6 +2714,8 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
list_add_tail(&erp->blocklist, &cqr->blocklist);
}
+
+
return erp;
} /* end dasd_3990_erp_action */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 20676cdef4a5..5b7bbc87593b 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -5,6 +5,8 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/list.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
@@ -503,7 +505,7 @@ static void lcu_update_work(struct work_struct *work)
*/
spin_lock_irqsave(&lcu->lock, flags);
if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
- DEV_MESSAGE(KERN_WARNING, device, "could not update"
+ DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
" alias data in lcu (rc = %d), retry later", rc);
schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
} else {
@@ -646,14 +648,16 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
{
struct dasd_ccw_req *cqr;
int rc = 0;
+ struct ccw1 *ccw;
cqr = lcu->rsu_cqr;
strncpy((char *) &cqr->magic, "ECKD", 4);
ASCEBC((char *) &cqr->magic, 4);
- cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RSCK;
- cqr->cpaddr->flags = 0 ;
- cqr->cpaddr->count = 16;
- cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+ ccw->flags = 0 ;
+ ccw->count = 16;
+ ccw->cda = (__u32)(addr_t) cqr->data;
((char *)cqr->data)[0] = reason;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -855,16 +859,25 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
struct alias_lcu *lcu;
char reason;
struct dasd_eckd_private *private;
+ char *sense;
private = (struct dasd_eckd_private *) device->private;
- reason = irb->ecw[8];
- DEV_MESSAGE(KERN_WARNING, device, "%s %x",
- "eckd handle summary unit check: reason", reason);
+ sense = dasd_get_sense(irb);
+ if (sense) {
+ reason = sense[8];
+ DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
+ "eckd handle summary unit check: reason", reason);
+ } else {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "eckd handle summary unit check:"
+ " no reason code available");
+ return;
+ }
lcu = private->lcu;
if (!lcu) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device not ready to handle summary"
" unit check (no lcu structure)");
return;
@@ -877,7 +890,7 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
* the next interrupt on a different device
*/
if (list_empty(&device->alias_list)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device is in offline processing,"
" don't do summary unit check handling");
spin_unlock(&lcu->lock);
@@ -885,7 +898,7 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
}
if (lcu->suc_data.device) {
/* already scheduled or running */
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"previous instance of summary unit check worker"
" still pending");
spin_unlock(&lcu->lock);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 34339902efb9..e77666c8e6c0 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -13,6 +13,8 @@
*
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -67,6 +69,8 @@ int dasd_probeonly = 0; /* is true, when probeonly mode is active */
int dasd_autodetect = 0; /* is true, when autodetection is active */
int dasd_nopav = 0; /* is true, when PAV is disabled */
EXPORT_SYMBOL_GPL(dasd_nopav);
+int dasd_nofcx; /* disable High Performance Ficon */
+EXPORT_SYMBOL_GPL(dasd_nofcx);
/*
* char *dasd[] is intended to hold the ranges supplied by the dasd= statement
@@ -125,6 +129,7 @@ __setup ("dasd=", dasd_call_setup);
* Read a device busid/devno from a string.
*/
static int
+
dasd_busid(char **str, int *id0, int *id1, int *devno)
{
int val, old_style;
@@ -132,8 +137,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
/* Interpret ipldev busid */
if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
if (ipl_info.type != IPL_TYPE_CCW) {
- MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw "
- "device");
+ pr_err("The IPL device is not a CCW device\n");
return -EINVAL;
}
*id0 = 0;
@@ -209,9 +213,8 @@ dasd_feature_list(char *str, char **endp)
else if (len == 8 && !strncmp(str, "failfast", 8))
features |= DASD_FEATURE_FAILFAST;
else {
- MESSAGE(KERN_WARNING,
- "unsupported feature: %*s, "
- "ignoring setting", len, str);
+ pr_warning("%*s is not a supported device option\n",
+ len, str);
rc = -EINVAL;
}
str += len;
@@ -220,8 +223,8 @@ dasd_feature_list(char *str, char **endp)
str++;
}
if (*str != ')') {
- MESSAGE(KERN_WARNING, "%s",
- "missing ')' in dasd parameter string\n");
+ pr_warning("A closing parenthesis ')' is missing in the "
+ "dasd= parameter\n");
rc = -EINVAL;
} else
str++;
@@ -253,25 +256,29 @@ dasd_parse_keyword( char *parsestring ) {
}
if (strncmp("autodetect", parsestring, length) == 0) {
dasd_autodetect = 1;
- MESSAGE (KERN_INFO, "%s",
- "turning to autodetection mode");
+ pr_info("The autodetection mode has been activated\n");
return residual_str;
}
if (strncmp("probeonly", parsestring, length) == 0) {
dasd_probeonly = 1;
- MESSAGE(KERN_INFO, "%s",
- "turning to probeonly mode");
+ pr_info("The probeonly mode has been activated\n");
return residual_str;
}
if (strncmp("nopav", parsestring, length) == 0) {
if (MACHINE_IS_VM)
- MESSAGE(KERN_INFO, "%s", "'nopav' not supported on VM");
+ pr_info("'nopav' is not supported on z/VM\n");
else {
dasd_nopav = 1;
- MESSAGE(KERN_INFO, "%s", "disable PAV mode");
+ pr_info("PAV support has be deactivated\n");
}
return residual_str;
}
+ if (strncmp("nofcx", parsestring, length) == 0) {
+ dasd_nofcx = 1;
+ pr_info("High Performance FICON support has been "
+ "deactivated\n");
+ return residual_str;
+ }
if (strncmp("fixedbuffers", parsestring, length) == 0) {
if (dasd_page_cache)
return residual_str;
@@ -280,10 +287,10 @@ dasd_parse_keyword( char *parsestring ) {
PAGE_SIZE, SLAB_CACHE_DMA,
NULL);
if (!dasd_page_cache)
- MESSAGE(KERN_WARNING, "%s", "Failed to create slab, "
+ DBF_EVENT(DBF_WARNING, "%s", "Failed to create slab, "
"fixed buffer mode disabled.");
else
- MESSAGE (KERN_INFO, "%s",
+ DBF_EVENT(DBF_INFO, "%s",
"turning on fixed buffer mode");
return residual_str;
}
@@ -321,7 +328,7 @@ dasd_parse_range( char *parsestring ) {
(from_id0 != to_id0 || from_id1 != to_id1 || from > to))
rc = -EINVAL;
if (rc) {
- MESSAGE(KERN_ERR, "Invalid device range %s", parsestring);
+ pr_err("%s is not a valid device range\n", parsestring);
return ERR_PTR(rc);
}
features = dasd_feature_list(str, &str);
@@ -340,8 +347,8 @@ dasd_parse_range( char *parsestring ) {
return str + 1;
if (*str == '\0')
return str;
- MESSAGE(KERN_WARNING,
- "junk at end of dasd parameter string: %s\n", str);
+ pr_warning("The dasd= parameter value %s has an invalid ending\n",
+ str);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index ef2a56952054..b9a7f7733446 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,6 +8,8 @@
*
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -144,8 +146,8 @@ dasd_diag_erp(struct dasd_device *device)
mdsk_term_io(device);
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
if (rc)
- DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, "
- "rc=%d", rc);
+ dev_warn(&device->cdev->dev, "DIAG ERP failed with "
+ "rc=%d\n", rc);
}
/* Start a given request at the device. Return zero on success, non-zero
@@ -160,7 +162,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
device = cqr->startdev;
if (cqr->retries < 0) {
- DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p "
+ DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
"- no retry left)", cqr);
cqr->status = DASD_CQR_ERROR;
return -EIO;
@@ -195,7 +197,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
break;
default: /* Error condition */
cqr->status = DASD_CQR_QUEUED;
- DEV_MESSAGE(KERN_WARNING, device, "dia250 returned rc=%d", rc);
+ DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
dasd_diag_erp(device);
rc = -EIO;
break;
@@ -243,13 +245,14 @@ dasd_ext_handler(__u16 code)
return;
}
if (!ip) { /* no intparm: unsolicited interrupt */
- MESSAGE(KERN_DEBUG, "%s", "caught unsolicited interrupt");
+ DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
+ "interrupt");
return;
}
cqr = (struct dasd_ccw_req *) ip;
device = (struct dasd_device *) cqr->startdev;
if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
- DEV_MESSAGE(KERN_WARNING, device,
+ DBF_DEV_EVENT(DBF_WARNING, device,
" magic number of dasd_ccw_req 0x%08X doesn't"
" match discipline 0x%08X",
cqr->magic, *(int *) (&device->discipline->name));
@@ -281,15 +284,11 @@ dasd_ext_handler(__u16 code)
rc = dasd_start_diag(next);
if (rc == 0)
expires = next->expires;
- else if (rc != -EACCES)
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "Interrupt fastpath "
- "failed!");
}
}
} else {
cqr->status = DASD_CQR_QUEUED;
- DEV_MESSAGE(KERN_WARNING, device, "interrupt status for "
+ DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
"request %p was %d (%d retries left)", cqr, status,
cqr->retries);
dasd_diag_erp(device);
@@ -322,8 +321,9 @@ dasd_diag_check_device(struct dasd_device *device)
if (private == NULL) {
private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
if (private == NULL) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "memory allocation failed for private data");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Allocating memory for private DASD data "
+ "failed\n");
return -ENOMEM;
}
ccw_device_get_id(device->cdev, &private->dev_id);
@@ -331,7 +331,7 @@ dasd_diag_check_device(struct dasd_device *device)
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"could not allocate dasd block structure");
device->private = NULL;
kfree(private);
@@ -347,7 +347,7 @@ dasd_diag_check_device(struct dasd_device *device)
rc = diag210((struct diag210 *) rdc_data);
if (rc) {
- DEV_MESSAGE(KERN_WARNING, device, "failed to retrieve device "
+ DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
"information (rc=%d)", rc);
rc = -EOPNOTSUPP;
goto out;
@@ -362,8 +362,8 @@ dasd_diag_check_device(struct dasd_device *device)
private->pt_block = 2;
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "unsupported device class "
- "(class=%d)", private->rdc_data.vdev_class);
+ dev_warn(&device->cdev->dev, "Device type %d is not supported "
+ "in DIAG mode\n", private->rdc_data.vdev_class);
rc = -EOPNOTSUPP;
goto out;
}
@@ -380,7 +380,7 @@ dasd_diag_check_device(struct dasd_device *device)
/* figure out blocksize of device */
label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
if (label == NULL) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to allocate initialization request");
rc = -ENOMEM;
goto out;
@@ -404,8 +404,8 @@ dasd_diag_check_device(struct dasd_device *device)
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(&private->iob, RW_BIO);
if (rc == 3) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "DIAG call failed");
+ dev_warn(&device->cdev->dev,
+ "A 64-bit DIAG call failed\n");
rc = -EOPNOTSUPP;
goto out_label;
}
@@ -414,8 +414,8 @@ dasd_diag_check_device(struct dasd_device *device)
break;
}
if (bsize > PAGE_SIZE) {
- DEV_MESSAGE(KERN_WARNING, device, "device access failed "
- "(rc=%d)", rc);
+ dev_warn(&device->cdev->dev, "Accessing the DASD failed because"
+ " of an incorrect format (rc=%d)\n", rc);
rc = -EIO;
goto out_label;
}
@@ -433,15 +433,15 @@ dasd_diag_check_device(struct dasd_device *device)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
if (rc) {
- DEV_MESSAGE(KERN_WARNING, device, "DIAG initialization "
- "failed (rc=%d)", rc);
+ dev_warn(&device->cdev->dev, "DIAG initialization "
+ "failed with rc=%d\n", rc);
rc = -EIO;
} else {
- DEV_MESSAGE(KERN_INFO, device,
- "(%ld B/blk): %ldkB",
- (unsigned long) block->bp_block,
- (unsigned long) (block->blocks <<
- block->s2b_shift) >> 1);
+ dev_info(&device->cdev->dev,
+ "New DASD with %ld byte/block, total size %ld KB\n",
+ (unsigned long) block->bp_block,
+ (unsigned long) (block->blocks <<
+ block->s2b_shift) >> 1);
}
out_label:
free_page((long) label);
@@ -595,7 +595,7 @@ static void
dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *stat)
{
- DEV_MESSAGE(KERN_ERR, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"dump sense not available for DIAG data");
}
@@ -621,10 +621,8 @@ static int __init
dasd_diag_init(void)
{
if (!MACHINE_IS_VM) {
- MESSAGE_LOG(KERN_INFO,
- "Machine is not VM: %s "
- "discipline not initializing",
- dasd_diag_discipline.name);
+ pr_info("Discipline %s cannot be used without z/VM\n",
+ dasd_diag_discipline.name);
return -ENODEV;
}
ASCEBC(dasd_diag_discipline.ebcname, 4);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bdb87998f364..21254793c604 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -11,6 +11,8 @@
*
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -27,9 +29,12 @@
#include <asm/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
+#include <asm/itcw.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
+#include "../cio/chsc.h"
+
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
@@ -84,7 +89,7 @@ dasd_eckd_probe (struct ccw_device *cdev)
/* set ECKD specific ccw-device options */
ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
if (ret) {
- printk(KERN_WARNING
+ DBF_EVENT(DBF_WARNING,
"dasd_eckd_probe: could not set ccw-device options "
"for %s\n", dev_name(&cdev->dev));
return ret;
@@ -159,6 +164,14 @@ recs_per_track(struct dasd_eckd_characteristics * rdc,
return 0;
}
+static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
+{
+ geo->cyl = (__u16) cyl;
+ geo->head = cyl >> 16;
+ geo->head <<= 4;
+ geo->head |= head;
+}
+
static int
check_XRC (struct ccw1 *de_ccw,
struct DE_eckd_data *data,
@@ -186,11 +199,12 @@ check_XRC (struct ccw1 *de_ccw,
}
static int
-define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
- int totrk, int cmd, struct dasd_device * device)
+define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
+ unsigned int totrk, int cmd, struct dasd_device *device)
{
struct dasd_eckd_private *private;
- struct ch_t geo, beg, end;
+ u32 begcyl, endcyl;
+ u16 heads, beghead, endhead;
int rc = 0;
private = (struct dasd_eckd_private *) device->private;
@@ -236,7 +250,8 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
rc = check_XRC (ccw, data, device);
break;
default:
- DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
+ dev_err(&device->cdev->dev,
+ "0x%x is not a known command\n", cmd);
break;
}
@@ -248,27 +263,24 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
&& !(private->uses_cdl && trk < 2))
data->ga_extended |= 0x40; /* Regular Data Format Mode */
- geo.cyl = private->rdc_data.no_cyl;
- geo.head = private->rdc_data.trk_per_cyl;
- beg.cyl = trk / geo.head;
- beg.head = trk % geo.head;
- end.cyl = totrk / geo.head;
- end.head = totrk % geo.head;
+ heads = private->rdc_data.trk_per_cyl;
+ begcyl = trk / heads;
+ beghead = trk % heads;
+ endcyl = totrk / heads;
+ endhead = totrk % heads;
/* check for sequential prestage - enhance cylinder range */
if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
data->attributes.operation == DASD_SEQ_ACCESS) {
- if (end.cyl + private->attrib.nr_cyl < geo.cyl)
- end.cyl += private->attrib.nr_cyl;
+ if (endcyl + private->attrib.nr_cyl < private->real_cyl)
+ endcyl += private->attrib.nr_cyl;
else
- end.cyl = (geo.cyl - 1);
+ endcyl = (private->real_cyl - 1);
}
- data->beg_ext.cyl = beg.cyl;
- data->beg_ext.head = beg.head;
- data->end_ext.cyl = end.cyl;
- data->end_ext.head = end.head;
+ set_ch_t(&data->beg_ext, begcyl, beghead);
+ set_ch_t(&data->end_ext, endcyl, endhead);
return rc;
}
@@ -283,29 +295,145 @@ static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
return 0;
/* switch on System Time Stamp - needed for XRC Support */
- pfxdata->define_extend.ga_extended |= 0x08; /* 'Time Stamp Valid' */
- pfxdata->define_extend.ga_extended |= 0x02; /* 'Extended Parameter' */
+ pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */
+ pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
- rc = get_sync_clock(&pfxdata->define_extend.ep_sys_time);
+ rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
/* Ignore return code if sync clock is switched off. */
if (rc == -ENOSYS || rc == -EACCES)
rc = 0;
return rc;
}
-static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
- int totrk, int cmd, struct dasd_device *basedev,
- struct dasd_device *startdev)
+static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
+ unsigned int rec_on_trk, int count, int cmd,
+ struct dasd_device *device, unsigned int reclen,
+ unsigned int tlf)
+{
+ struct dasd_eckd_private *private;
+ int sector;
+ int dn, d;
+
+ private = (struct dasd_eckd_private *) device->private;
+
+ memset(data, 0, sizeof(*data));
+ sector = 0;
+ if (rec_on_trk) {
+ switch (private->rdc_data.dev_type) {
+ case 0x3390:
+ dn = ceil_quot(reclen + 6, 232);
+ d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
+ sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+ break;
+ case 0x3380:
+ d = 7 + ceil_quot(reclen + 12, 32);
+ sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+ break;
+ }
+ }
+ data->sector = sector;
+ /* note: meaning of count depends on the operation
+ * for record based I/O it's the number of records, but for
+ * track based I/O it's the number of tracks
+ */
+ data->count = count;
+ switch (cmd) {
+ case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+ data->operation.orientation = 0x1;
+ data->operation.operation = 0x03;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_READ_RECORD_ZERO:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_WRITE:
+ case DASD_ECKD_CCW_WRITE_MT:
+ case DASD_ECKD_CCW_WRITE_KD:
+ case DASD_ECKD_CCW_WRITE_KD_MT:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x01;
+ break;
+ case DASD_ECKD_CCW_WRITE_CKD:
+ case DASD_ECKD_CCW_WRITE_CKD_MT:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen; /* not tlf, as one might think */
+ data->operation.operation = 0x3F;
+ data->extended_operation = 0x23;
+ break;
+ case DASD_ECKD_CCW_READ:
+ case DASD_ECKD_CCW_READ_MT:
+ case DASD_ECKD_CCW_READ_KD:
+ case DASD_ECKD_CCW_READ_KD_MT:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x06;
+ break;
+ case DASD_ECKD_CCW_READ_CKD:
+ case DASD_ECKD_CCW_READ_CKD_MT:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_READ_COUNT:
+ data->operation.operation = 0x06;
+ break;
+ case DASD_ECKD_CCW_READ_TRACK_DATA:
+ data->auxiliary.length_valid = 0x1;
+ data->length = tlf;
+ data->operation.operation = 0x0C;
+ break;
+ case DASD_ECKD_CCW_ERASE:
+ data->length = reclen;
+ data->auxiliary.length_valid = 0x1;
+ data->operation.operation = 0x0b;
+ break;
+ default:
+ DBF_DEV_EVENT(DBF_ERR, device,
+ "fill LRE unknown opcode 0x%x", cmd);
+ BUG();
+ }
+ set_ch_t(&data->seek_addr,
+ trk / private->rdc_data.trk_per_cyl,
+ trk % private->rdc_data.trk_per_cyl);
+ data->search_arg.cyl = data->seek_addr.cyl;
+ data->search_arg.head = data->seek_addr.head;
+ data->search_arg.record = rec_on_trk;
+}
+
+static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
+ unsigned int trk, unsigned int totrk, int cmd,
+ struct dasd_device *basedev, struct dasd_device *startdev,
+ unsigned char format, unsigned int rec_on_trk, int count,
+ unsigned int blksize, unsigned int tlf)
{
struct dasd_eckd_private *basepriv, *startpriv;
- struct DE_eckd_data *data;
- struct ch_t geo, beg, end;
+ struct DE_eckd_data *dedata;
+ struct LRE_eckd_data *lredata;
+ u32 begcyl, endcyl;
+ u16 heads, beghead, endhead;
int rc = 0;
basepriv = (struct dasd_eckd_private *) basedev->private;
startpriv = (struct dasd_eckd_private *) startdev->private;
- data = &pfxdata->define_extend;
+ dedata = &pfxdata->define_extent;
+ lredata = &pfxdata->locate_record;
ccw->cmd_code = DASD_ECKD_CCW_PFX;
ccw->flags = 0;
@@ -314,10 +442,16 @@ static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
memset(pfxdata, 0, sizeof(*pfxdata));
/* prefix data */
- pfxdata->format = 0;
+ if (format > 1) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "PFX LRE unknown format 0x%x", format);
+ BUG();
+ return -EINVAL;
+ }
+ pfxdata->format = format;
pfxdata->base_address = basepriv->ned->unit_addr;
pfxdata->base_lss = basepriv->ned->ID;
- pfxdata->validity.define_extend = 1;
+ pfxdata->validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
if (startpriv->uid.type != UA_BASE_DEVICE) {
@@ -337,70 +471,94 @@ static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
case DASD_ECKD_CCW_READ_COUNT:
- data->mask.perm = 0x1;
- data->attributes.operation = basepriv->attrib.operation;
+ dedata->mask.perm = 0x1;
+ dedata->attributes.operation = basepriv->attrib.operation;
+ break;
+ case DASD_ECKD_CCW_READ_TRACK_DATA:
+ dedata->mask.perm = 0x1;
+ dedata->attributes.operation = basepriv->attrib.operation;
+ dedata->blk_size = 0;
break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
case DASD_ECKD_CCW_WRITE_KD_MT:
- data->mask.perm = 0x02;
- data->attributes.operation = basepriv->attrib.operation;
+ dedata->mask.perm = 0x02;
+ dedata->attributes.operation = basepriv->attrib.operation;
rc = check_XRC_on_prefix(pfxdata, basedev);
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
- data->attributes.operation = DASD_BYPASS_CACHE;
+ dedata->attributes.operation = DASD_BYPASS_CACHE;
rc = check_XRC_on_prefix(pfxdata, basedev);
break;
case DASD_ECKD_CCW_ERASE:
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
- data->mask.perm = 0x3;
- data->mask.auth = 0x1;
- data->attributes.operation = DASD_BYPASS_CACHE;
+ dedata->mask.perm = 0x3;
+ dedata->mask.auth = 0x1;
+ dedata->attributes.operation = DASD_BYPASS_CACHE;
rc = check_XRC_on_prefix(pfxdata, basedev);
break;
- default:
- DEV_MESSAGE(KERN_ERR, basedev, "unknown opcode 0x%x", cmd);
+ case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+ dedata->mask.perm = 0x02;
+ dedata->attributes.operation = basepriv->attrib.operation;
+ dedata->blk_size = blksize;
+ rc = check_XRC_on_prefix(pfxdata, basedev);
break;
+ default:
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "PFX LRE unknown opcode 0x%x", cmd);
+ BUG();
+ return -EINVAL;
}
- data->attributes.mode = 0x3; /* ECKD */
+ dedata->attributes.mode = 0x3; /* ECKD */
if ((basepriv->rdc_data.cu_type == 0x2105 ||
basepriv->rdc_data.cu_type == 0x2107 ||
basepriv->rdc_data.cu_type == 0x1750)
&& !(basepriv->uses_cdl && trk < 2))
- data->ga_extended |= 0x40; /* Regular Data Format Mode */
+ dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
- geo.cyl = basepriv->rdc_data.no_cyl;
- geo.head = basepriv->rdc_data.trk_per_cyl;
- beg.cyl = trk / geo.head;
- beg.head = trk % geo.head;
- end.cyl = totrk / geo.head;
- end.head = totrk % geo.head;
+ heads = basepriv->rdc_data.trk_per_cyl;
+ begcyl = trk / heads;
+ beghead = trk % heads;
+ endcyl = totrk / heads;
+ endhead = totrk % heads;
/* check for sequential prestage - enhance cylinder range */
- if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
- data->attributes.operation == DASD_SEQ_ACCESS) {
+ if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
+ dedata->attributes.operation == DASD_SEQ_ACCESS) {
- if (end.cyl + basepriv->attrib.nr_cyl < geo.cyl)
- end.cyl += basepriv->attrib.nr_cyl;
+ if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
+ endcyl += basepriv->attrib.nr_cyl;
else
- end.cyl = (geo.cyl - 1);
+ endcyl = (basepriv->real_cyl - 1);
+ }
+
+ set_ch_t(&dedata->beg_ext, begcyl, beghead);
+ set_ch_t(&dedata->end_ext, endcyl, endhead);
+
+ if (format == 1) {
+ fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
+ basedev, blksize, tlf);
}
- data->beg_ext.cyl = beg.cyl;
- data->beg_ext.head = beg.head;
- data->end_ext.cyl = end.cyl;
- data->end_ext.head = end.head;
return rc;
}
+static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
+ unsigned int trk, unsigned int totrk, int cmd,
+ struct dasd_device *basedev, struct dasd_device *startdev)
+{
+ return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
+ 0, 0, 0, 0, 0);
+}
+
static void
-locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
- int rec_on_trk, int no_rec, int cmd,
+locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
+ unsigned int rec_on_trk, int no_rec, int cmd,
struct dasd_device * device, int reclen)
{
struct dasd_eckd_private *private;
@@ -491,12 +649,14 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
data->operation.operation = 0x0b;
break;
default:
- DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
- }
- data->seek_addr.cyl = data->search_arg.cyl =
- trk / private->rdc_data.trk_per_cyl;
- data->seek_addr.head = data->search_arg.head =
- trk % private->rdc_data.trk_per_cyl;
+ DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
+ "opcode 0x%x", cmd);
+ }
+ set_ch_t(&data->seek_addr,
+ trk / private->rdc_data.trk_per_cyl,
+ trk % private->rdc_data.trk_per_cyl);
+ data->search_arg.cyl = data->seek_addr.cyl;
+ data->search_arg.head = data->seek_addr.head;
data->search_arg.record = rec_on_trk;
}
@@ -585,8 +745,8 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device);
if (IS_ERR(cqr)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "Could not allocate RCD request");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Could not allocate RCD request");
return cqr;
}
@@ -736,14 +896,16 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
rc = dasd_eckd_read_conf_lpm(device, &conf_data,
&conf_len, lpm);
if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
- MESSAGE(KERN_WARNING,
- "Read configuration data returned "
- "error %d", rc);
+ DBF_EVENT(DBF_WARNING,
+ "Read configuration data returned "
+ "error %d for device: %s", rc,
+ dev_name(&device->cdev->dev));
return rc;
}
if (conf_data == NULL) {
- MESSAGE(KERN_WARNING, "%s", "No configuration "
- "data retrieved");
+ DBF_EVENT(DBF_WARNING, "No configuration "
+ "data retrieved for device: %s",
+ dev_name(&device->cdev->dev));
continue; /* no error */
}
/* save first valid configuration data */
@@ -790,8 +952,9 @@ static int dasd_eckd_read_features(struct dasd_device *device)
sizeof(struct dasd_rssd_features)),
device);
if (IS_ERR(cqr)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "Could not allocate initialization request");
+ DBF_EVENT(DBF_WARNING, "Could not allocate initialization "
+ "request for device: %s",
+ dev_name(&device->cdev->dev));
return PTR_ERR(cqr);
}
cqr->startdev = device;
@@ -840,7 +1003,8 @@ static int dasd_eckd_read_features(struct dasd_device *device)
/*
* Build CP for Perform Subsystem Function - SSC.
*/
-static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device)
+static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
+ int enable_pav)
{
struct dasd_ccw_req *cqr;
struct dasd_psf_ssc_data *psf_ssc_data;
@@ -851,15 +1015,17 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device)
device);
if (IS_ERR(cqr)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate PSF-SSC request");
return cqr;
}
psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
psf_ssc_data->order = PSF_ORDER_SSC;
- psf_ssc_data->suborder = 0x88;
- psf_ssc_data->reserved[0] = 0x88;
-
+ psf_ssc_data->suborder = 0x40;
+ if (enable_pav) {
+ psf_ssc_data->suborder |= 0x88;
+ psf_ssc_data->reserved[0] = 0x88;
+ }
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = (__u32)(addr_t)psf_ssc_data;
@@ -880,12 +1046,12 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device)
* call might change behaviour of DASD devices.
*/
static int
-dasd_eckd_psf_ssc(struct dasd_device *device)
+dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
{
struct dasd_ccw_req *cqr;
int rc;
- cqr = dasd_eckd_build_psf_ssc(device);
+ cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
@@ -904,19 +1070,20 @@ static int dasd_eckd_validate_server(struct dasd_device *device)
{
int rc;
struct dasd_eckd_private *private;
+ int enable_pav;
- /* Currently PAV is the only reason to 'validate' server on LPAR */
if (dasd_nopav || MACHINE_IS_VM)
- return 0;
-
- rc = dasd_eckd_psf_ssc(device);
+ enable_pav = 0;
+ else
+ enable_pav = 1;
+ rc = dasd_eckd_psf_ssc(device, enable_pav);
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
private = (struct dasd_eckd_private *) device->private;
- DEV_MESSAGE(KERN_INFO, device,
- "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
- private->uid.vendor, private->uid.serial,
- private->uid.ssid, rc);
+ DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x "
+ "returned rc=%d for device: %s",
+ private->uid.vendor, private->uid.serial,
+ private->uid.ssid, rc, dev_name(&device->cdev->dev));
/* RE-Read Configuration Data */
return dasd_eckd_read_conf(device);
}
@@ -938,9 +1105,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
private = kzalloc(sizeof(struct dasd_eckd_private),
GFP_KERNEL | GFP_DMA);
if (private == NULL) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "memory allocation failed for private "
- "data");
+ dev_warn(&device->cdev->dev,
+ "Allocating memory for private DASD data "
+ "failed\n");
return -ENOMEM;
}
device->private = (void *) private;
@@ -965,8 +1132,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (private->uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "could not allocate dasd block structure");
+ DBF_EVENT(DBF_WARNING, "could not allocate dasd "
+ "block structure for device: %s",
+ dev_name(&device->cdev->dev));
rc = PTR_ERR(block);
goto out_err1;
}
@@ -997,20 +1165,27 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
memset(rdc_data, 0, sizeof(rdc_data));
rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64);
if (rc) {
- DEV_MESSAGE(KERN_WARNING, device,
- "Read device characteristics returned "
- "rc=%d", rc);
+ DBF_EVENT(DBF_WARNING,
+ "Read device characteristics failed, rc=%d for "
+ "device: %s", rc, dev_name(&device->cdev->dev));
goto out_err3;
}
- DEV_MESSAGE(KERN_INFO, device,
- "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
- private->rdc_data.dev_type,
- private->rdc_data.dev_model,
- private->rdc_data.cu_type,
- private->rdc_data.cu_model.model,
- private->rdc_data.no_cyl,
- private->rdc_data.trk_per_cyl,
- private->rdc_data.sec_per_trk);
+ /* find the vaild cylinder size */
+ if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
+ private->rdc_data.long_no_cyl)
+ private->real_cyl = private->rdc_data.long_no_cyl;
+ else
+ private->real_cyl = private->rdc_data.no_cyl;
+
+ dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
+ "with %d cylinders, %d heads, %d sectors\n",
+ private->rdc_data.dev_type,
+ private->rdc_data.dev_model,
+ private->rdc_data.cu_type,
+ private->rdc_data.cu_model.model,
+ private->real_cyl,
+ private->rdc_data.trk_per_cyl,
+ private->rdc_data.sec_per_trk);
return 0;
out_err3:
@@ -1151,14 +1326,12 @@ dasd_eckd_end_analysis(struct dasd_block *block)
status = private->init_cqr_status;
private->init_cqr_status = -1;
if (status != DASD_CQR_DONE) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "volume analysis returned unformatted disk");
+ dev_warn(&device->cdev->dev,
+ "The DASD is not formatted\n");
return -EMEDIUMTYPE;
}
private->uses_cdl = 1;
- /* Calculate number of blocks/records per track. */
- blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
/* Check Track 0 for Compatible Disk Layout */
count_area = NULL;
for (i = 0; i < 3; i++) {
@@ -1182,8 +1355,8 @@ dasd_eckd_end_analysis(struct dasd_block *block)
count_area = &private->count_area[0];
} else {
if (private->count_area[3].record == 1)
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "Trk 0: no records after VTOC!");
+ dev_warn(&device->cdev->dev,
+ "Track 0 has no records following the VTOC\n");
}
if (count_area != NULL && count_area->kl == 0) {
/* we found notthing violating our disk layout */
@@ -1191,8 +1364,8 @@ dasd_eckd_end_analysis(struct dasd_block *block)
block->bp_block = count_area->dl;
}
if (block->bp_block == 0) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "Volume has incompatible disk layout");
+ dev_warn(&device->cdev->dev,
+ "The disk layout of the DASD is not supported\n");
return -EMEDIUMTYPE;
}
block->s2b_shift = 0; /* bits to shift 512 to get a block */
@@ -1200,19 +1373,19 @@ dasd_eckd_end_analysis(struct dasd_block *block)
block->s2b_shift++;
blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
- block->blocks = (private->rdc_data.no_cyl *
+ block->blocks = (private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk);
- DEV_MESSAGE(KERN_INFO, device,
- "(%dkB blks): %dkB at %dkB/trk %s",
- (block->bp_block >> 10),
- ((private->rdc_data.no_cyl *
- private->rdc_data.trk_per_cyl *
- blk_per_trk * (block->bp_block >> 9)) >> 1),
- ((blk_per_trk * block->bp_block) >> 10),
- private->uses_cdl ?
- "compatible disk layout" : "linux disk layout");
+ dev_info(&device->cdev->dev,
+ "DASD with %d KB/block, %d KB total size, %d KB/track, "
+ "%s\n", (block->bp_block >> 10),
+ ((private->real_cyl *
+ private->rdc_data.trk_per_cyl *
+ blk_per_trk * (block->bp_block >> 9)) >> 1),
+ ((blk_per_trk * block->bp_block) >> 10),
+ private->uses_cdl ?
+ "compatible disk layout" : "linux disk layout");
return 0;
}
@@ -1262,31 +1435,35 @@ dasd_eckd_format_device(struct dasd_device * device,
struct eckd_count *ect;
struct ccw1 *ccw;
void *data;
- int rpt, cyl, head;
+ int rpt;
+ struct ch_t address;
int cplength, datasize;
int i;
+ int intensity = 0;
+ int r0_perm;
private = (struct dasd_eckd_private *) device->private;
rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
- cyl = fdata->start_unit / private->rdc_data.trk_per_cyl;
- head = fdata->start_unit % private->rdc_data.trk_per_cyl;
+ set_ch_t(&address,
+ fdata->start_unit / private->rdc_data.trk_per_cyl,
+ fdata->start_unit % private->rdc_data.trk_per_cyl);
/* Sanity checks. */
if (fdata->start_unit >=
- (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)) {
- DEV_MESSAGE(KERN_INFO, device, "Track no %d too big!",
- fdata->start_unit);
+ (private->real_cyl * private->rdc_data.trk_per_cyl)) {
+ dev_warn(&device->cdev->dev, "Start track number %d used in "
+ "formatting is too big\n", fdata->start_unit);
return ERR_PTR(-EINVAL);
}
if (fdata->start_unit > fdata->stop_unit) {
- DEV_MESSAGE(KERN_INFO, device, "Track %d reached! ending.",
- fdata->start_unit);
+ dev_warn(&device->cdev->dev, "Start track %d used in "
+ "formatting exceeds end track\n", fdata->start_unit);
return ERR_PTR(-EINVAL);
}
if (dasd_check_blocksize(fdata->blksize) != 0) {
- DEV_MESSAGE(KERN_WARNING, device,
- "Invalid blocksize %d...terminating!",
- fdata->blksize);
+ dev_warn(&device->cdev->dev,
+ "The DASD cannot be formatted with block size %d\n",
+ fdata->blksize);
return ERR_PTR(-EINVAL);
}
@@ -1296,9 +1473,17 @@ dasd_eckd_format_device(struct dasd_device * device,
* Bit 1: write home address, currently not supported
* Bit 2: invalidate tracks
* Bit 3: use OS/390 compatible disk layout (cdl)
+ * Bit 4: do not allow storage subsystem to modify record zero
* Only some bit combinations do make sense.
*/
- switch (fdata->intensity) {
+ if (fdata->intensity & 0x10) {
+ r0_perm = 0;
+ intensity = fdata->intensity & ~0x10;
+ } else {
+ r0_perm = 1;
+ intensity = fdata->intensity;
+ }
+ switch (intensity) {
case 0x00: /* Normal format */
case 0x08: /* Normal format, use cdl. */
cplength = 2 + rpt;
@@ -1322,8 +1507,8 @@ dasd_eckd_format_device(struct dasd_device * device,
sizeof(struct eckd_count);
break;
default:
- DEV_MESSAGE(KERN_WARNING, device, "Invalid flags 0x%x.",
- fdata->intensity);
+ dev_warn(&device->cdev->dev, "An I/O control call used "
+ "incorrect flags 0x%x\n", fdata->intensity);
return ERR_PTR(-EINVAL);
}
/* Allocate the format ccw request. */
@@ -1335,11 +1520,14 @@ dasd_eckd_format_device(struct dasd_device * device,
data = fcp->data;
ccw = fcp->cpaddr;
- switch (fdata->intensity & ~0x08) {
+ switch (intensity & ~0x08) {
case 0x00: /* Normal format. */
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->start_unit,
DASD_ECKD_CCW_WRITE_CKD, device);
+ /* grant subsystem permission to format R0 */
+ if (r0_perm)
+ ((struct DE_eckd_data *)data)->ga_extended |= 0x04;
data += sizeof(struct DE_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
@@ -1373,11 +1561,11 @@ dasd_eckd_format_device(struct dasd_device * device,
data += sizeof(struct LO_eckd_data);
break;
}
- if (fdata->intensity & 0x01) { /* write record zero */
+ if (intensity & 0x01) { /* write record zero */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
- ect->cyl = cyl;
- ect->head = head;
+ ect->cyl = address.cyl;
+ ect->head = address.head;
ect->record = 0;
ect->kl = 0;
ect->dl = 8;
@@ -1388,11 +1576,11 @@ dasd_eckd_format_device(struct dasd_device * device,
ccw->cda = (__u32)(addr_t) ect;
ccw++;
}
- if ((fdata->intensity & ~0x08) & 0x04) { /* erase track */
+ if ((intensity & ~0x08) & 0x04) { /* erase track */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
- ect->cyl = cyl;
- ect->head = head;
+ ect->cyl = address.cyl;
+ ect->head = address.head;
ect->record = 1;
ect->kl = 0;
ect->dl = 0;
@@ -1405,20 +1593,20 @@ dasd_eckd_format_device(struct dasd_device * device,
for (i = 0; i < rpt; i++) {
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
- ect->cyl = cyl;
- ect->head = head;
+ ect->cyl = address.cyl;
+ ect->head = address.head;
ect->record = i + 1;
ect->kl = 0;
ect->dl = fdata->blksize;
/* Check for special tracks 0-1 when formatting CDL */
- if ((fdata->intensity & 0x08) &&
+ if ((intensity & 0x08) &&
fdata->start_unit == 0) {
if (i < 3) {
ect->kl = 4;
ect->dl = sizes_trk0[i] - 4;
}
}
- if ((fdata->intensity & 0x08) &&
+ if ((intensity & 0x08) &&
fdata->start_unit == 1) {
ect->kl = 44;
ect->dl = LABEL_SIZE - 44;
@@ -1479,57 +1667,69 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
struct irb *irb)
{
char mask;
+ char *sense = NULL;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
- if ((irb->scsw.cmd.dstat & mask) == mask) {
+ if ((scsw_dstat(&irb->scsw) & mask) == mask) {
dasd_generic_handle_state_change(device);
return;
}
/* summary unit check */
- if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+ if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
(irb->ecw[7] == 0x0D)) {
dasd_alias_handle_summary_unit_check(device, irb);
return;
}
-
+ sense = dasd_get_sense(irb);
/* service information message SIM */
- if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) &&
- ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
- dasd_3990_erp_handle_sim(device, irb->ecw);
+ if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
+ ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
+ dasd_3990_erp_handle_sim(device, sense);
dasd_schedule_device_bh(device);
return;
}
- if ((irb->scsw.cmd.cc == 1) &&
- (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
- (irb->scsw.cmd.actl & SCSW_ACTL_START_PEND) &&
- (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND)) {
+ if ((scsw_cc(&irb->scsw) == 1) &&
+ (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
+ (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) &&
+ (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) {
/* fake irb do nothing, they are handled elsewhere */
dasd_schedule_device_bh(device);
return;
}
- if (!(irb->esw.esw0.erw.cons)) {
+ if (!sense) {
/* just report other unsolicited interrupts */
- DEV_MESSAGE(KERN_ERR, device, "%s",
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
"unsolicited interrupt received");
} else {
- DEV_MESSAGE(KERN_ERR, device, "%s",
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
"unsolicited interrupt received "
"(sense available)");
- device->discipline->dump_sense(device, NULL, irb);
+ device->discipline->dump_sense_dbf(device, NULL, irb,
+ "unsolicited");
}
dasd_schedule_device_bh(device);
return;
};
-static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
+ struct dasd_device *startdev,
struct dasd_block *block,
- struct request *req)
+ struct request *req,
+ sector_t first_rec,
+ sector_t last_rec,
+ sector_t first_trk,
+ sector_t last_trk,
+ unsigned int first_offs,
+ unsigned int last_offs,
+ unsigned int blk_per_trk,
+ unsigned int blksize)
{
struct dasd_eckd_private *private;
unsigned long *idaws;
@@ -1539,11 +1739,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
struct req_iterator iter;
struct bio_vec *bv;
char *dst;
- unsigned int blksize, blk_per_trk, off;
+ unsigned int off;
int count, cidaw, cplength, datasize;
- sector_t recid, first_rec, last_rec;
- sector_t first_trk, last_trk;
- unsigned int first_offs, last_offs;
+ sector_t recid;
unsigned char cmd, rcmd;
int use_prefix;
struct dasd_device *basedev;
@@ -1556,15 +1754,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
cmd = DASD_ECKD_CCW_WRITE_MT;
else
return ERR_PTR(-EINVAL);
- /* Calculate number of blocks/records per track. */
- blksize = block->bp_block;
- blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
- /* Calculate record id of first and last block. */
- first_rec = first_trk = req->sector >> block->s2b_shift;
- first_offs = sector_div(first_trk, blk_per_trk);
- last_rec = last_trk =
- (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
- last_offs = sector_div(last_trk, blk_per_trk);
+
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
@@ -1714,6 +1904,497 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
return cqr;
}
+static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
+ struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req,
+ sector_t first_rec,
+ sector_t last_rec,
+ sector_t first_trk,
+ sector_t last_trk,
+ unsigned int first_offs,
+ unsigned int last_offs,
+ unsigned int blk_per_trk,
+ unsigned int blksize)
+{
+ struct dasd_eckd_private *private;
+ unsigned long *idaws;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ struct req_iterator iter;
+ struct bio_vec *bv;
+ char *dst, *idaw_dst;
+ unsigned int cidaw, cplength, datasize;
+ unsigned int tlf;
+ sector_t recid;
+ unsigned char cmd;
+ struct dasd_device *basedev;
+ unsigned int trkcount, count, count_to_trk_end;
+ unsigned int idaw_len, seg_len, part_len, len_to_track_end;
+ unsigned char new_track, end_idaw;
+ sector_t trkid;
+ unsigned int recoffs;
+
+ basedev = block->base;
+ private = (struct dasd_eckd_private *) basedev->private;
+ if (rq_data_dir(req) == READ)
+ cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
+ else if (rq_data_dir(req) == WRITE)
+ cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
+ else
+ return ERR_PTR(-EINVAL);
+
+ /* Track based I/O needs IDAWs for each page, and not just for
+ * 64 bit addresses. We need additional idals for pages
+ * that get filled from two tracks, so we use the number
+ * of records as upper limit.
+ */
+ cidaw = last_rec - first_rec + 1;
+ trkcount = last_trk - first_trk + 1;
+
+ /* 1x prefix + one read/write ccw per track */
+ cplength = 1 + trkcount;
+
+ /* on 31-bit we need space for two 32 bit addresses per page
+ * on 64-bit one 64 bit address
+ */
+ datasize = sizeof(struct PFX_eckd_data) +
+ cidaw * sizeof(unsigned long long);
+
+ /* Allocate the ccw request. */
+ cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+ cplength, datasize, startdev);
+ if (IS_ERR(cqr))
+ return cqr;
+ ccw = cqr->cpaddr;
+ /* transfer length factor: how many bytes to read from the last track */
+ if (first_trk == last_trk)
+ tlf = last_offs - first_offs + 1;
+ else
+ tlf = last_offs + 1;
+ tlf *= blksize;
+
+ if (prefix_LRE(ccw++, cqr->data, first_trk,
+ last_trk, cmd, basedev, startdev,
+ 1 /* format */, first_offs + 1,
+ trkcount, blksize,
+ tlf) == -EAGAIN) {
+ /* Clock not in sync and XRC is enabled.
+ * Try again later.
+ */
+ dasd_sfree_request(cqr, startdev);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ /*
+ * The translation of request into ccw programs must meet the
+ * following conditions:
+ * - all idaws but the first and the last must address full pages
+ * (or 2K blocks on 31-bit)
+ * - the scope of a ccw and it's idal ends with the track boundaries
+ */
+ idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
+ recid = first_rec;
+ new_track = 1;
+ end_idaw = 0;
+ len_to_track_end = 0;
+ idaw_dst = 0;
+ idaw_len = 0;
+ rq_for_each_segment(bv, req, iter) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ seg_len = bv->bv_len;
+ while (seg_len) {
+ if (new_track) {
+ trkid = recid;
+ recoffs = sector_div(trkid, blk_per_trk);
+ count_to_trk_end = blk_per_trk - recoffs;
+ count = min((last_rec - recid + 1),
+ (sector_t)count_to_trk_end);
+ len_to_track_end = count * blksize;
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = cmd;
+ ccw->count = len_to_track_end;
+ ccw->cda = (__u32)(addr_t)idaws;
+ ccw->flags = CCW_FLAG_IDA;
+ ccw++;
+ recid += count;
+ new_track = 0;
+ }
+ /* If we start a new idaw, everything is fine and the
+ * start of the new idaw is the start of this segment.
+ * If we continue an idaw, we must make sure that the
+ * current segment begins where the so far accumulated
+ * idaw ends
+ */
+ if (!idaw_dst)
+ idaw_dst = dst;
+ if ((idaw_dst + idaw_len) != dst) {
+ dasd_sfree_request(cqr, startdev);
+ return ERR_PTR(-ERANGE);
+ }
+ part_len = min(seg_len, len_to_track_end);
+ seg_len -= part_len;
+ dst += part_len;
+ idaw_len += part_len;
+ len_to_track_end -= part_len;
+ /* collected memory area ends on an IDA_BLOCK border,
+ * -> create an idaw
+ * idal_create_words will handle cases where idaw_len
+ * is larger then IDA_BLOCK_SIZE
+ */
+ if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
+ end_idaw = 1;
+ /* We also need to end the idaw at track end */
+ if (!len_to_track_end) {
+ new_track = 1;
+ end_idaw = 1;
+ }
+ if (end_idaw) {
+ idaws = idal_create_words(idaws, idaw_dst,
+ idaw_len);
+ idaw_dst = 0;
+ idaw_len = 0;
+ end_idaw = 0;
+ }
+ }
+ }
+
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->block = block;
+ cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->lpm = private->path_data.ppm;
+ cqr->retries = 256;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+static int prepare_itcw(struct itcw *itcw,
+ unsigned int trk, unsigned int totrk, int cmd,
+ struct dasd_device *basedev,
+ struct dasd_device *startdev,
+ unsigned int rec_on_trk, int count,
+ unsigned int blksize,
+ unsigned int total_data_size,
+ unsigned int tlf,
+ unsigned int blk_per_trk)
+{
+ struct PFX_eckd_data pfxdata;
+ struct dasd_eckd_private *basepriv, *startpriv;
+ struct DE_eckd_data *dedata;
+ struct LRE_eckd_data *lredata;
+ struct dcw *dcw;
+
+ u32 begcyl, endcyl;
+ u16 heads, beghead, endhead;
+ u8 pfx_cmd;
+
+ int rc = 0;
+ int sector = 0;
+ int dn, d;
+
+
+ /* setup prefix data */
+ basepriv = (struct dasd_eckd_private *) basedev->private;
+ startpriv = (struct dasd_eckd_private *) startdev->private;
+ dedata = &pfxdata.define_extent;
+ lredata = &pfxdata.locate_record;
+
+ memset(&pfxdata, 0, sizeof(pfxdata));
+ pfxdata.format = 1; /* PFX with LRE */
+ pfxdata.base_address = basepriv->ned->unit_addr;
+ pfxdata.base_lss = basepriv->ned->ID;
+ pfxdata.validity.define_extent = 1;
+
+ /* private uid is kept up to date, conf_data may be outdated */
+ if (startpriv->uid.type != UA_BASE_DEVICE) {
+ pfxdata.validity.verify_base = 1;
+ if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
+ pfxdata.validity.hyper_pav = 1;
+ }
+
+ switch (cmd) {
+ case DASD_ECKD_CCW_READ_TRACK_DATA:
+ dedata->mask.perm = 0x1;
+ dedata->attributes.operation = basepriv->attrib.operation;
+ dedata->blk_size = blksize;
+ dedata->ga_extended |= 0x42;
+ lredata->operation.orientation = 0x0;
+ lredata->operation.operation = 0x0C;
+ lredata->auxiliary.check_bytes = 0x01;
+ pfx_cmd = DASD_ECKD_CCW_PFX_READ;
+ break;
+ case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+ dedata->mask.perm = 0x02;
+ dedata->attributes.operation = basepriv->attrib.operation;
+ dedata->blk_size = blksize;
+ rc = check_XRC_on_prefix(&pfxdata, basedev);
+ dedata->ga_extended |= 0x42;
+ lredata->operation.orientation = 0x0;
+ lredata->operation.operation = 0x3F;
+ lredata->extended_operation = 0x23;
+ lredata->auxiliary.check_bytes = 0x2;
+ pfx_cmd = DASD_ECKD_CCW_PFX;
+ break;
+ default:
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "prepare itcw, unknown opcode 0x%x", cmd);
+ BUG();
+ break;
+ }
+ if (rc)
+ return rc;
+
+ dedata->attributes.mode = 0x3; /* ECKD */
+
+ heads = basepriv->rdc_data.trk_per_cyl;
+ begcyl = trk / heads;
+ beghead = trk % heads;
+ endcyl = totrk / heads;
+ endhead = totrk % heads;
+
+ /* check for sequential prestage - enhance cylinder range */
+ if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
+ dedata->attributes.operation == DASD_SEQ_ACCESS) {
+
+ if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
+ endcyl += basepriv->attrib.nr_cyl;
+ else
+ endcyl = (basepriv->real_cyl - 1);
+ }
+
+ set_ch_t(&dedata->beg_ext, begcyl, beghead);
+ set_ch_t(&dedata->end_ext, endcyl, endhead);
+
+ dedata->ep_format = 0x20; /* records per track is valid */
+ dedata->ep_rec_per_track = blk_per_trk;
+
+ if (rec_on_trk) {
+ switch (basepriv->rdc_data.dev_type) {
+ case 0x3390:
+ dn = ceil_quot(blksize + 6, 232);
+ d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
+ sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+ break;
+ case 0x3380:
+ d = 7 + ceil_quot(blksize + 12, 32);
+ sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+ break;
+ }
+ }
+
+ lredata->auxiliary.length_valid = 1;
+ lredata->auxiliary.length_scope = 1;
+ lredata->auxiliary.imbedded_ccw_valid = 1;
+ lredata->length = tlf;
+ lredata->imbedded_ccw = cmd;
+ lredata->count = count;
+ lredata->sector = sector;
+ set_ch_t(&lredata->seek_addr, begcyl, beghead);
+ lredata->search_arg.cyl = lredata->seek_addr.cyl;
+ lredata->search_arg.head = lredata->seek_addr.head;
+ lredata->search_arg.record = rec_on_trk;
+
+ dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
+ &pfxdata, sizeof(pfxdata), total_data_size);
+
+ return rc;
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
+ struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req,
+ sector_t first_rec,
+ sector_t last_rec,
+ sector_t first_trk,
+ sector_t last_trk,
+ unsigned int first_offs,
+ unsigned int last_offs,
+ unsigned int blk_per_trk,
+ unsigned int blksize)
+{
+ struct dasd_eckd_private *private;
+ struct dasd_ccw_req *cqr;
+ struct req_iterator iter;
+ struct bio_vec *bv;
+ char *dst;
+ unsigned int trkcount, ctidaw;
+ unsigned char cmd;
+ struct dasd_device *basedev;
+ unsigned int tlf;
+ struct itcw *itcw;
+ struct tidaw *last_tidaw = NULL;
+ int itcw_op;
+ size_t itcw_size;
+
+ basedev = block->base;
+ private = (struct dasd_eckd_private *) basedev->private;
+ if (rq_data_dir(req) == READ) {
+ cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
+ itcw_op = ITCW_OP_READ;
+ } else if (rq_data_dir(req) == WRITE) {
+ cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
+ itcw_op = ITCW_OP_WRITE;
+ } else
+ return ERR_PTR(-EINVAL);
+
+ /* trackbased I/O needs address all memory via TIDAWs,
+ * not just for 64 bit addresses. This allows us to map
+ * each segment directly to one tidaw.
+ */
+ trkcount = last_trk - first_trk + 1;
+ ctidaw = 0;
+ rq_for_each_segment(bv, req, iter) {
+ ++ctidaw;
+ }
+
+ /* Allocate the ccw request. */
+ itcw_size = itcw_calc_size(0, ctidaw, 0);
+ cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+ 0, itcw_size, startdev);
+ if (IS_ERR(cqr))
+ return cqr;
+
+ cqr->cpmode = 1;
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->block = block;
+ cqr->expires = 100*HZ;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+ cqr->retries = 10;
+
+ /* transfer length factor: how many bytes to read from the last track */
+ if (first_trk == last_trk)
+ tlf = last_offs - first_offs + 1;
+ else
+ tlf = last_offs + 1;
+ tlf *= blksize;
+
+ itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
+ cqr->cpaddr = itcw_get_tcw(itcw);
+
+ if (prepare_itcw(itcw, first_trk, last_trk,
+ cmd, basedev, startdev,
+ first_offs + 1,
+ trkcount, blksize,
+ (last_rec - first_rec + 1) * blksize,
+ tlf, blk_per_trk) == -EAGAIN) {
+ /* Clock not in sync and XRC is enabled.
+ * Try again later.
+ */
+ dasd_sfree_request(cqr, startdev);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ /*
+ * A tidaw can address 4k of memory, but must not cross page boundaries
+ * We can let the block layer handle this by setting
+ * blk_queue_segment_boundary to page boundaries and
+ * blk_max_segment_size to page size when setting up the request queue.
+ */
+ rq_for_each_segment(bv, req, iter) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len);
+ if (IS_ERR(last_tidaw))
+ return (struct dasd_ccw_req *)last_tidaw;
+ }
+
+ last_tidaw->flags |= 0x80;
+ itcw_finalize(itcw);
+
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->block = block;
+ cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->lpm = private->path_data.ppm;
+ cqr->retries = 256;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req)
+{
+ int tpm, cmdrtd, cmdwtd;
+ int use_prefix;
+
+ struct dasd_eckd_private *private;
+ int fcx_in_css, fcx_in_gneq, fcx_in_features;
+ struct dasd_device *basedev;
+ sector_t first_rec, last_rec;
+ sector_t first_trk, last_trk;
+ unsigned int first_offs, last_offs;
+ unsigned int blk_per_trk, blksize;
+ int cdlspecial;
+ struct dasd_ccw_req *cqr;
+
+ basedev = block->base;
+ private = (struct dasd_eckd_private *) basedev->private;
+
+ /* Calculate number of blocks/records per track. */
+ blksize = block->bp_block;
+ blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+ /* Calculate record id of first and last block. */
+ first_rec = first_trk = req->sector >> block->s2b_shift;
+ first_offs = sector_div(first_trk, blk_per_trk);
+ last_rec = last_trk =
+ (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ last_offs = sector_div(last_trk, blk_per_trk);
+ cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
+
+ /* is transport mode supported ? */
+ fcx_in_css = css_general_characteristics.fcx;
+ fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
+ fcx_in_features = private->features.feature[40] & 0x80;
+ tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
+
+ /* is read track data and write track data in command mode supported? */
+ cmdrtd = private->features.feature[9] & 0x20;
+ cmdwtd = private->features.feature[12] & 0x40;
+ use_prefix = private->features.feature[8] & 0x01;
+
+ cqr = NULL;
+ if (cdlspecial || dasd_page_cache) {
+ /* do nothing, just fall through to the cmd mode single case */
+ } else if (!dasd_nofcx && tpm && (first_trk == last_trk)) {
+ cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
+ first_rec, last_rec,
+ first_trk, last_trk,
+ first_offs, last_offs,
+ blk_per_trk, blksize);
+ if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
+ cqr = NULL;
+ } else if (use_prefix &&
+ (((rq_data_dir(req) == READ) && cmdrtd) ||
+ ((rq_data_dir(req) == WRITE) && cmdwtd))) {
+ cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
+ first_rec, last_rec,
+ first_trk, last_trk,
+ first_offs, last_offs,
+ blk_per_trk, blksize);
+ if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
+ cqr = NULL;
+ }
+ if (!cqr)
+ cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
+ first_rec, last_rec,
+ first_trk, last_trk,
+ first_offs, last_offs,
+ blk_per_trk, blksize);
+ return cqr;
+}
+
static int
dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
@@ -1767,7 +2448,7 @@ out:
}
/*
- * Modify ccw chain in cqr so it can be started on a base device.
+ * Modify ccw/tcw in cqr so it can be started on a base device.
*
* Note that this is not enough to restart the cqr!
* Either reset cqr->startdev as well (summary unit check handling)
@@ -1777,13 +2458,24 @@ void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
{
struct ccw1 *ccw;
struct PFX_eckd_data *pfxdata;
-
- ccw = cqr->cpaddr;
- pfxdata = cqr->data;
-
- if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
+ struct tcw *tcw;
+ struct tccb *tccb;
+ struct dcw *dcw;
+
+ if (cqr->cpmode == 1) {
+ tcw = cqr->cpaddr;
+ tccb = tcw_get_tccb(tcw);
+ dcw = (struct dcw *)&tccb->tca[0];
+ pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
pfxdata->validity.verify_base = 0;
pfxdata->validity.hyper_pav = 0;
+ } else {
+ ccw = cqr->cpaddr;
+ pfxdata = cqr->data;
+ if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
+ pfxdata->validity.verify_base = 0;
+ pfxdata->validity.hyper_pav = 0;
+ }
}
}
@@ -1861,6 +2553,7 @@ dasd_eckd_release(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
+ struct ccw1 *ccw;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -1868,14 +2561,15 @@ dasd_eckd_release(struct dasd_device *device)
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1, 32, device);
if (IS_ERR(cqr)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
return PTR_ERR(cqr);
}
- cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE;
- cqr->cpaddr->flags |= CCW_FLAG_SLI;
- cqr->cpaddr->count = 32;
- cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 32;
+ ccw->cda = (__u32)(addr_t) cqr->data;
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -1902,6 +2596,7 @@ dasd_eckd_reserve(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
+ struct ccw1 *ccw;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -1909,14 +2604,15 @@ dasd_eckd_reserve(struct dasd_device *device)
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1, 32, device);
if (IS_ERR(cqr)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
return PTR_ERR(cqr);
}
- cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE;
- cqr->cpaddr->flags |= CCW_FLAG_SLI;
- cqr->cpaddr->count = 32;
- cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 32;
+ ccw->cda = (__u32)(addr_t) cqr->data;
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -1942,6 +2638,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
+ struct ccw1 *ccw;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -1949,14 +2646,15 @@ dasd_eckd_steal_lock(struct dasd_device *device)
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1, 32, device);
if (IS_ERR(cqr)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
return PTR_ERR(cqr);
}
- cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK;
- cqr->cpaddr->flags |= CCW_FLAG_SLI;
- cqr->cpaddr->count = 32;
- cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_SLCK;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 32;
+ ccw->cda = (__u32)(addr_t) cqr->data;
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -1990,7 +2688,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
sizeof(struct dasd_rssd_perf_stats_t)),
device);
if (IS_ERR(cqr)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
return PTR_ERR(cqr);
}
@@ -2080,9 +2778,9 @@ dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
return -EFAULT;
private->attrib = attrib;
- DEV_MESSAGE(KERN_INFO, device,
- "cache operation mode set to %x (%i cylinder prestage)",
- private->attrib.operation, private->attrib.nr_cyl);
+ dev_info(&device->cdev->dev,
+ "The DASD cache mode was set to %x (%i cylinder prestage)\n",
+ private->attrib.operation, private->attrib.nr_cyl);
return 0;
}
@@ -2133,7 +2831,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
/* setup CCWs for PSF + RSSD */
cqr = dasd_smalloc_request("ECKD", 2 , 0, device);
if (IS_ERR(cqr)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
rc = PTR_ERR(cqr);
goto out_free;
@@ -2242,11 +2940,54 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
return len;
}
+static void
+dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req,
+ struct irb *irb, char *reason)
+{
+ u64 *sense;
+ int sl;
+ struct tsb *tsb;
+
+ sense = NULL;
+ tsb = NULL;
+ if (req && scsw_is_tm(&req->irb.scsw)) {
+ if (irb->scsw.tm.tcw)
+ tsb = tcw_get_tsb(
+ (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
+ if (tsb && (irb->scsw.tm.fcxs == 0x01)) {
+ switch (tsb->flags & 0x07) {
+ case 1: /* tsa_iostat */
+ sense = (u64 *)tsb->tsa.iostat.sense;
+ break;
+ case 2: /* ts_ddpc */
+ sense = (u64 *)tsb->tsa.ddpc.sense;
+ break;
+ case 3: /* tsa_intrg */
+ break;
+ }
+ }
+ } else {
+ if (irb->esw.esw0.erw.cons)
+ sense = (u64 *)irb->ecw;
+ }
+ if (sense) {
+ for (sl = 0; sl < 4; sl++) {
+ DBF_DEV_EVENT(DBF_EMERG, device,
+ "%s: %016llx %016llx %016llx %016llx",
+ reason, sense[0], sense[1], sense[2],
+ sense[3]);
+ }
+ } else {
+ DBF_DEV_EVENT(DBF_EMERG, device, "%s",
+ "SORRY - NO VALID SENSE AVAILABLE\n");
+ }
+}
+
/*
* Print sense data and related channel program.
* Parts are printed because printk buffer is only 1024 bytes.
*/
-static void dasd_eckd_dump_sense(struct dasd_device *device,
+static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
char *page;
@@ -2255,8 +2996,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
- DEV_MESSAGE(KERN_ERR, device, " %s",
- "No memory to dump sense data");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "No memory to dump sense data\n");
return;
}
/* dump the sense data */
@@ -2265,7 +3006,7 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
- irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
+ scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
@@ -2341,6 +3082,147 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
free_page((unsigned long) page);
}
+
+/*
+ * Print sense data from a tcw.
+ */
+static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
+ struct dasd_ccw_req *req, struct irb *irb)
+{
+ char *page;
+ int len, sl, sct, residual;
+
+ struct tsb *tsb;
+ u8 *sense;
+
+
+ page = (char *) get_zeroed_page(GFP_ATOMIC);
+ if (page == NULL) {
+ DBF_DEV_EVENT(DBF_WARNING, device, " %s",
+ "No memory to dump sense data");
+ return;
+ }
+ /* dump the sense data */
+ len = sprintf(page, KERN_ERR PRINTK_HEADER
+ " I/O status report for device %s:\n",
+ dev_name(&device->cdev->dev));
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " in req: %p CS: 0x%02X DS: 0x%02X "
+ "fcxs: 0x%02X schxs: 0x%02X\n", req,
+ scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
+ irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " device %s: Failing TCW: %p\n",
+ dev_name(&device->cdev->dev),
+ (void *) (addr_t) irb->scsw.tm.tcw);
+
+ tsb = NULL;
+ sense = NULL;
+ if (irb->scsw.tm.tcw)
+ tsb = tcw_get_tsb(
+ (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
+
+ if (tsb && (irb->scsw.tm.fcxs == 0x01)) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->length %d\n", tsb->length);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->flags %x\n", tsb->flags);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->dcw_offset %d\n", tsb->dcw_offset);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->count %d\n", tsb->count);
+ residual = tsb->count - 28;
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " residual %d\n", residual);
+
+ switch (tsb->flags & 0x07) {
+ case 1: /* tsa_iostat */
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->tsa.iostat.dev_time %d\n",
+ tsb->tsa.iostat.dev_time);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->tsa.iostat.def_time %d\n",
+ tsb->tsa.iostat.def_time);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->tsa.iostat.queue_time %d\n",
+ tsb->tsa.iostat.queue_time);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->tsa.iostat.dev_busy_time %d\n",
+ tsb->tsa.iostat.dev_busy_time);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->tsa.iostat.dev_act_time %d\n",
+ tsb->tsa.iostat.dev_act_time);
+ sense = tsb->tsa.iostat.sense;
+ break;
+ case 2: /* ts_ddpc */
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->tsa.ddpc.rcq: ");
+ for (sl = 0; sl < 16; sl++) {
+ for (sct = 0; sct < 8; sct++) {
+ len += sprintf(page + len, " %02x",
+ tsb->tsa.ddpc.rcq[sl]);
+ }
+ len += sprintf(page + len, "\n");
+ }
+ sense = tsb->tsa.ddpc.sense;
+ break;
+ case 3: /* tsa_intrg */
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " tsb->tsa.intrg.: not supportet yet \n");
+ break;
+ }
+
+ if (sense) {
+ for (sl = 0; sl < 4; sl++) {
+ len += sprintf(page + len,
+ KERN_ERR PRINTK_HEADER
+ " Sense(hex) %2d-%2d:",
+ (8 * sl), ((8 * sl) + 7));
+ for (sct = 0; sct < 8; sct++) {
+ len += sprintf(page + len, " %02x",
+ sense[8 * sl + sct]);
+ }
+ len += sprintf(page + len, "\n");
+ }
+
+ if (sense[27] & DASD_SENSE_BIT_0) {
+ /* 24 Byte Sense Data */
+ sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " 24 Byte: %x MSG %x, "
+ "%s MSGb to SYSOP\n",
+ sense[7] >> 4, sense[7] & 0x0f,
+ sense[1] & 0x10 ? "" : "no");
+ } else {
+ /* 32 Byte Sense Data */
+ sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " 32 Byte: Format: %x "
+ "Exception class %x\n",
+ sense[6] & 0x0f, sense[22] >> 4);
+ }
+ } else {
+ sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " SORRY - NO VALID SENSE AVAILABLE\n");
+ }
+ } else {
+ sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " SORRY - NO TSB DATA AVAILABLE\n");
+ }
+ printk("%s", page);
+ free_page((unsigned long) page);
+}
+
+static void dasd_eckd_dump_sense(struct dasd_device *device,
+ struct dasd_ccw_req *req, struct irb *irb)
+{
+ if (req && scsw_is_tm(&req->irb.scsw))
+ dasd_eckd_dump_sense_tcw(device, req, irb);
+ else
+ dasd_eckd_dump_sense_ccw(device, req, irb);
+}
+
+
/*
* max_blocks is dependent on the amount of storage that is available
* in the static io buffer for each device. Currently each device has
@@ -2375,6 +3257,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.build_cp = dasd_eckd_build_alias_cp,
.free_cp = dasd_eckd_free_alias_cp,
.dump_sense = dasd_eckd_dump_sense,
+ .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
.fill_info = dasd_eckd_fill_info,
.ioctl = dasd_eckd_ioctl,
};
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 2476f87d21d0..ad45bcac3ce4 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -38,8 +38,11 @@
#define DASD_ECKD_CCW_RELEASE 0x94
#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
+#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
+#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
#define DASD_ECKD_CCW_RESERVE 0xB4
#define DASD_ECKD_CCW_PFX 0xE7
+#define DASD_ECKD_CCW_PFX_READ 0xEA
#define DASD_ECKD_CCW_RSCK 0xF9
/*
@@ -48,6 +51,11 @@
#define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_SSC 0x1D
+/*
+ * Size that is reportet for large volumes in the old 16-bit no_cyl field
+ */
+#define LV_COMPAT_CYL 0xFFFE
+
/*****************************************************************************
* SECTION: Type Definitions
****************************************************************************/
@@ -118,7 +126,9 @@ struct DE_eckd_data {
unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
__u8 ep_format; /* Extended Parameter format byte */
__u8 ep_prio; /* Extended Parameter priority I/O byte */
- __u8 ep_reserved[6]; /* Extended Parameter Reserved */
+ __u8 ep_reserved1; /* Extended Parameter Reserved */
+ __u8 ep_rec_per_track; /* Number of records on a track */
+ __u8 ep_reserved[4]; /* Extended Parameter Reserved */
} __attribute__ ((packed));
struct LO_eckd_data {
@@ -139,11 +149,37 @@ struct LO_eckd_data {
__u16 length;
} __attribute__ ((packed));
+struct LRE_eckd_data {
+ struct {
+ unsigned char orientation:2;
+ unsigned char operation:6;
+ } __attribute__ ((packed)) operation;
+ struct {
+ unsigned char length_valid:1;
+ unsigned char length_scope:1;
+ unsigned char imbedded_ccw_valid:1;
+ unsigned char check_bytes:2;
+ unsigned char imbedded_count_valid:1;
+ unsigned char reserved:1;
+ unsigned char read_count_suffix:1;
+ } __attribute__ ((packed)) auxiliary;
+ __u8 imbedded_ccw;
+ __u8 count;
+ struct ch_t seek_addr;
+ struct chr_t search_arg;
+ __u8 sector;
+ __u16 length;
+ __u8 imbedded_count;
+ __u8 extended_operation;
+ __u16 extended_parameter_length;
+ __u8 extended_parameter[0];
+} __attribute__ ((packed));
+
/* Prefix data for format 0x00 and 0x01 */
struct PFX_eckd_data {
unsigned char format;
struct {
- unsigned char define_extend:1;
+ unsigned char define_extent:1;
unsigned char time_stamp:1;
unsigned char verify_base:1;
unsigned char hyper_pav:1;
@@ -153,9 +189,8 @@ struct PFX_eckd_data {
__u8 aux;
__u8 base_lss;
__u8 reserved[7];
- struct DE_eckd_data define_extend;
- struct LO_eckd_data locate_record;
- __u8 LO_extended_data[4];
+ struct DE_eckd_data define_extent;
+ struct LRE_eckd_data locate_record;
} __attribute__ ((packed));
struct dasd_eckd_characteristics {
@@ -228,7 +263,8 @@ struct dasd_eckd_characteristics {
__u8 factor7;
__u8 factor8;
__u8 reserved2[3];
- __u8 reserved3[10];
+ __u8 reserved3[6];
+ __u32 long_no_cyl;
} __attribute__ ((packed));
/* elements of the configuration data */
@@ -406,6 +442,7 @@ struct dasd_eckd_private {
int uses_cdl;
struct attrib_data_t attrib; /* e.g. cache operations */
struct dasd_rssd_features features;
+ u32 real_cyl;
/* alias managemnet */
struct dasd_uid uid;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index f8e05ce98621..c24c8c30380d 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -6,6 +6,8 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -297,11 +299,12 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
+ char *sense;
/* go through cqr chain and count the valid sense data sets */
data_size = 0;
for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
- if (temp_cqr->irb.esw.esw0.erw.cons)
+ if (dasd_get_sense(&temp_cqr->irb))
data_size += 32;
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
@@ -316,9 +319,11 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
list_for_each_entry(eerb, &bufferlist, list) {
dasd_eer_start_record(eerb, header.total_size);
dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
- for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
- if (temp_cqr->irb.esw.esw0.erw.cons)
- dasd_eer_write_buffer(eerb, cqr->irb.ecw, 32);
+ for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
+ sense = dasd_get_sense(&temp_cqr->irb);
+ if (sense)
+ dasd_eer_write_buffer(eerb, sense, 32);
+ }
dasd_eer_write_buffer(eerb, "EOR", 4);
}
spin_unlock_irqrestore(&bufferlock, flags);
@@ -451,6 +456,7 @@ int dasd_eer_enable(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
unsigned long flags;
+ struct ccw1 *ccw;
if (device->eer_cqr)
return 0;
@@ -468,10 +474,11 @@ int dasd_eer_enable(struct dasd_device *device)
cqr->expires = 10 * HZ;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
- cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS;
- cqr->cpaddr->count = SNSS_DATA_SIZE;
- cqr->cpaddr->flags = 0;
- cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_SNSS;
+ ccw->count = SNSS_DATA_SIZE;
+ ccw->flags = 0;
+ ccw->cda = (__u32)(addr_t) cqr->data;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
@@ -534,7 +541,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
if (eerb->buffer_page_count < 1 ||
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
kfree(eerb);
- MESSAGE(KERN_WARNING, "can't open device since module "
+ DBF_EVENT(DBF_WARNING, "can't open device since module "
"parameter eer_pages is smaller than 1 or"
" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
unlock_kernel();
@@ -687,7 +694,7 @@ int __init dasd_eer_init(void)
if (rc) {
kfree(dasd_eer_dev);
dasd_eer_dev = NULL;
- MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
+ DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
"register misc device");
return rc;
}
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 8f10000851a3..d970ce2814be 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -9,6 +9,8 @@
*
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/ctype.h>
#include <linux/init.h>
@@ -91,14 +93,14 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
/* just retry - there is nothing to save ... I got no sense data.... */
if (cqr->retries > 0) {
- DEV_MESSAGE (KERN_DEBUG, device,
+ DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP called (%i retries left)",
cqr->retries);
cqr->lpm = LPM_ANYPATH;
cqr->status = DASD_CQR_FILLED;
} else {
- DEV_MESSAGE (KERN_WARNING, device, "%s",
- "default ERP called (NO retry left)");
+ dev_err(&device->cdev->dev,
+ "default ERP has run out of retries and failed\n");
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_clock();
}
@@ -162,8 +164,21 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
device->discipline->dump_sense(device, cqr, irb);
}
+void
+dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
+{
+ struct dasd_device *device;
+
+ device = cqr->startdev;
+ /* dump sense data to s390 debugfeature*/
+ if (device->discipline && device->discipline->dump_sense_dbf)
+ device->discipline->dump_sense_dbf(device, cqr, irb, "log");
+}
+EXPORT_SYMBOL(dasd_log_sense_dbf);
+
EXPORT_SYMBOL(dasd_default_erp_action);
EXPORT_SYMBOL(dasd_default_erp_postaction);
EXPORT_SYMBOL(dasd_alloc_erp_request);
EXPORT_SYMBOL(dasd_free_erp_request);
EXPORT_SYMBOL(dasd_log_sense);
+
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index f1d176021694..a3eb6fd14673 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -6,6 +6,8 @@
*
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <asm/debug.h>
@@ -128,17 +130,18 @@ dasd_fba_check_characteristics(struct dasd_device *device)
private = kzalloc(sizeof(struct dasd_fba_private),
GFP_KERNEL | GFP_DMA);
if (private == NULL) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "memory allocation failed for private "
- "data");
+ dev_warn(&device->cdev->dev,
+ "Allocating memory for private DASD "
+ "data failed\n");
return -ENOMEM;
}
device->private = (void *) private;
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
- DEV_MESSAGE(KERN_WARNING, device, "%s",
- "could not allocate dasd block structure");
+ DBF_EVENT(DBF_WARNING, "could not allocate dasd block "
+ "structure for device: %s",
+ dev_name(&device->cdev->dev));
device->private = NULL;
kfree(private);
return PTR_ERR(block);
@@ -150,9 +153,9 @@ dasd_fba_check_characteristics(struct dasd_device *device)
rdc_data = (void *) &(private->rdc_data);
rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32);
if (rc) {
- DEV_MESSAGE(KERN_WARNING, device,
- "Read device characteristics returned error %d",
- rc);
+ DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
+ "error %d for device: %s",
+ rc, dev_name(&device->cdev->dev));
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
@@ -160,15 +163,16 @@ dasd_fba_check_characteristics(struct dasd_device *device)
return rc;
}
- DEV_MESSAGE(KERN_INFO, device,
- "%04X/%02X(CU:%04X/%02X) %dMB at(%d B/blk)",
- cdev->id.dev_type,
- cdev->id.dev_model,
- cdev->id.cu_type,
- cdev->id.cu_model,
- ((private->rdc_data.blk_bdsa *
- (private->rdc_data.blk_size >> 9)) >> 11),
- private->rdc_data.blk_size);
+ dev_info(&device->cdev->dev,
+ "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
+ "and %d B/blk\n",
+ cdev->id.dev_type,
+ cdev->id.dev_model,
+ cdev->id.cu_type,
+ cdev->id.cu_model,
+ ((private->rdc_data.blk_bdsa *
+ (private->rdc_data.blk_size >> 9)) >> 11),
+ private->rdc_data.blk_size);
return 0;
}
@@ -180,7 +184,7 @@ static int dasd_fba_do_analysis(struct dasd_block *block)
private = (struct dasd_fba_private *) block->base->private;
rc = dasd_check_blocksize(private->rdc_data.blk_size);
if (rc) {
- DEV_MESSAGE(KERN_INFO, block->base, "unknown blocksize %d",
+ DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
private->rdc_data.blk_size);
return rc;
}
@@ -215,7 +219,7 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
if (cqr->function == dasd_default_erp_action)
return dasd_default_erp_postaction;
- DEV_MESSAGE(KERN_WARNING, cqr->startdev, "unknown ERP action %p",
+ DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
cqr->function);
return NULL;
}
@@ -233,9 +237,9 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
}
/* check for unsolicited interrupts */
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"unsolicited interrupt received");
- device->discipline->dump_sense(device, NULL, irb);
+ device->discipline->dump_sense_dbf(device, NULL, irb, "unsolicited");
dasd_schedule_device_bh(device);
return;
};
@@ -437,6 +441,25 @@ dasd_fba_fill_info(struct dasd_device * device,
}
static void
+dasd_fba_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req,
+ struct irb *irb, char *reason)
+{
+ int sl;
+ if (irb->esw.esw0.erw.cons) {
+ for (sl = 0; sl < 4; sl++) {
+ DBF_DEV_EVENT(DBF_EMERG, device,
+ "%s: %08x %08x %08x %08x",
+ reason, irb->ecw[8 * 0], irb->ecw[8 * 1],
+ irb->ecw[8 * 2], irb->ecw[8 * 3]);
+ }
+ } else {
+ DBF_DEV_EVENT(DBF_EMERG, device, "%s",
+ "SORRY - NO VALID SENSE AVAILABLE\n");
+ }
+}
+
+
+static void
dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *irb)
{
@@ -446,7 +469,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
- DEV_MESSAGE(KERN_ERR, device, " %s",
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to dump sense data");
return;
}
@@ -476,8 +499,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
- MESSAGE_LOG(KERN_ERR, "%s",
- page + sizeof(KERN_ERR PRINTK_HEADER));
+ printk(KERN_ERR "%s", page);
/* dump the Channel Program */
/* print first CCWs (maximum 8) */
@@ -498,8 +520,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "\n");
act++;
}
- MESSAGE_LOG(KERN_ERR, "%s",
- page + sizeof(KERN_ERR PRINTK_HEADER));
+ printk(KERN_ERR "%s", page);
/* print failing CCW area */
@@ -540,8 +561,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
act++;
}
if (len > 0)
- MESSAGE_LOG(KERN_ERR, "%s",
- page + sizeof(KERN_ERR PRINTK_HEADER));
+ printk(KERN_ERR "%s", page);
free_page((unsigned long) page);
}
@@ -576,6 +596,7 @@ static struct dasd_discipline dasd_fba_discipline = {
.build_cp = dasd_fba_build_cp,
.free_cp = dasd_fba_free_cp,
.dump_sense = dasd_fba_dump_sense,
+ .dump_sense_dbf = dasd_fba_dump_sense_dbf,
.fill_info = dasd_fba_fill_info,
};
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index e99d566b69cc..d3198303b93c 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -11,6 +11,8 @@
*
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
@@ -163,9 +165,8 @@ int dasd_gendisk_init(void)
/* Register to static dasd major 94 */
rc = register_blkdev(DASD_MAJOR, "dasd");
if (rc != 0) {
- MESSAGE(KERN_WARNING,
- "Couldn't register successfully to "
- "major no %d", DASD_MAJOR);
+ pr_warning("Registering the device driver with major number "
+ "%d failed\n", DASD_MAJOR);
return rc;
}
return 0;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 4a39084d9c95..c1e487f774c6 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -112,6 +112,9 @@ do { \
d_data); \
} while(0)
+/* limit size for an errorstring */
+#define ERRORLENGTH 30
+
/* definition of dbf debug levels */
#define DBF_EMERG 0 /* system is unusable */
#define DBF_ALERT 1 /* action must be taken immediately */
@@ -157,7 +160,8 @@ struct dasd_ccw_req {
struct dasd_block *block; /* the originating block device */
struct dasd_device *memdev; /* the device used to allocate this */
struct dasd_device *startdev; /* device the request is started on */
- struct ccw1 *cpaddr; /* address of channel program */
+ void *cpaddr; /* address of ccw or tcw */
+ unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
char status; /* status of this request */
short retries; /* A retry counter */
unsigned long flags; /* flags of this request */
@@ -280,6 +284,8 @@ struct dasd_discipline {
dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
struct irb *);
+ void (*dump_sense_dbf) (struct dasd_device *, struct dasd_ccw_req *,
+ struct irb *, char *);
void (*handle_unsolicited_interrupt) (struct dasd_device *,
struct irb *);
@@ -378,7 +384,7 @@ struct dasd_block {
struct block_device *bdev;
atomic_t open_count;
- unsigned long blocks; /* size of volume in blocks */
+ unsigned long long blocks; /* size of volume in blocks */
unsigned int bp_block; /* bytes per block */
unsigned int s2b_shift; /* log2 (bp_block/512) */
@@ -573,12 +579,14 @@ int dasd_generic_notify(struct ccw_device *, int);
void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int);
+char *dasd_get_sense(struct irb *);
/* externals in dasd_devmap.c */
extern int dasd_max_devindex;
extern int dasd_probeonly;
extern int dasd_autodetect;
extern int dasd_nopav;
+extern int dasd_nofcx;
int dasd_devmap_init(void);
void dasd_devmap_exit(void);
@@ -623,6 +631,7 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
struct dasd_device *);
void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
+void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
/* externals in dasd_3990_erp.c */
struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index b82d816d9ef7..4ce3f72ee1c1 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -9,6 +9,9 @@
*
* i/o controls for the dasd driver.
*/
+
+#define KMSG_COMPONENT "dasd"
+
#include <linux/interrupt.h>
#include <linux/major.h>
#include <linux/fs.h>
@@ -94,7 +97,8 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
- DEV_MESSAGE(KERN_DEBUG, base, "%s", "Quiesce IO on device");
+ dev_info(&base->cdev->dev, "The DASD has been put in the quiesce "
+ "state\n");
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped |= DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -103,7 +107,7 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
/*
- * Quiesce device.
+ * Resume device.
*/
static int dasd_ioctl_resume(struct dasd_block *block)
{
@@ -114,7 +118,8 @@ static int dasd_ioctl_resume(struct dasd_block *block)
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
- DEV_MESSAGE(KERN_DEBUG, base, "%s", "resume IO on device");
+ dev_info(&base->cdev->dev, "I/O operations have been resumed "
+ "on the DASD\n");
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped &= ~DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -140,13 +145,13 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
return -EPERM;
if (base->state != DASD_STATE_BASIC) {
- DEV_MESSAGE(KERN_WARNING, base, "%s",
- "dasd_format: device is not disabled! ");
+ dev_warn(&base->cdev->dev,
+ "The DASD cannot be formatted while it is enabled\n");
return -EBUSY;
}
DBF_DEV_EVENT(DBF_NOTICE, base,
- "formatting units %d to %d (%d B blocks) flags %d",
+ "formatting units %u to %u (%u B blocks) flags %u",
fdata->start_unit,
fdata->stop_unit, fdata->blksize, fdata->intensity);
@@ -169,10 +174,9 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
dasd_sfree_request(cqr, cqr->memdev);
if (rc) {
if (rc != -ERESTARTSYS)
- DEV_MESSAGE(KERN_ERR, base,
- " Formatting of unit %d failed "
- "with rc = %d",
- fdata->start_unit, rc);
+ dev_err(&base->cdev->dev,
+ "Formatting unit %d failed with "
+ "rc=%d\n", fdata->start_unit, rc);
return rc;
}
fdata->start_unit++;
@@ -199,8 +203,9 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
return -EFAULT;
if (bdev != bdev->bd_contains) {
- DEV_MESSAGE(KERN_WARNING, block->base, "%s",
- "Cannot low-level format a partition");
+ dev_warn(&block->base->cdev->dev,
+ "The specified DASD is a partition and cannot be "
+ "formatted\n");
return -EINVAL;
}
return dasd_format(block, &fdata);
@@ -365,9 +370,9 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
return ret;
}
-int
-dasd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+static int
+dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
struct dasd_block *block = bdev->bd_disk->private_data;
void __user *argp = (void __user *)arg;
@@ -420,3 +425,14 @@ dasd_ioctl(struct block_device *bdev, fmode_t mode,
return -EINVAL;
}
}
+
+int dasd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc;
+
+ lock_kernel();
+ rc = dasd_do_ioctl(bdev, mode, cmd, arg);
+ unlock_kernel();
+ return rc;
+}
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index bf6fd348f20e..654daa3cdfda 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -11,6 +11,8 @@
*
*/
+#define KMSG_COMPONENT "dasd"
+
#include <linux/ctype.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
@@ -112,7 +114,7 @@ dasd_devices_show(struct seq_file *m, void *v)
seq_printf(m, "n/f ");
else
seq_printf(m,
- "at blocksize: %d, %ld blocks, %ld MB",
+ "at blocksize: %d, %lld blocks, %lld MB",
block->bp_block, block->blocks,
((block->bp_block >> 9) *
block->blocks) >> 11);
@@ -267,7 +269,7 @@ dasd_statistics_write(struct file *file, const char __user *user_buf,
buffer = dasd_get_user_string(user_buf, user_len);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- MESSAGE_LOG(KERN_INFO, "/proc/dasd/statictics: '%s'", buffer);
+ DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
/* check for valid verbs */
for (str = buffer; isspace(*str); str++);
@@ -277,33 +279,33 @@ dasd_statistics_write(struct file *file, const char __user *user_buf,
if (strcmp(str, "on") == 0) {
/* switch on statistics profiling */
dasd_profile_level = DASD_PROFILE_ON;
- MESSAGE(KERN_INFO, "%s", "Statistics switched on");
+ pr_info("The statistics feature has been switched "
+ "on\n");
} else if (strcmp(str, "off") == 0) {
/* switch off and reset statistics profiling */
memset(&dasd_global_profile,
0, sizeof (struct dasd_profile_info_t));
dasd_profile_level = DASD_PROFILE_OFF;
- MESSAGE(KERN_INFO, "%s", "Statistics switched off");
+ pr_info("The statistics feature has been switched "
+ "off\n");
} else
goto out_error;
} else if (strncmp(str, "reset", 5) == 0) {
/* reset the statistics */
memset(&dasd_global_profile, 0,
sizeof (struct dasd_profile_info_t));
- MESSAGE(KERN_INFO, "%s", "Statistics reset");
+ pr_info("The statistics have been reset\n");
} else
goto out_error;
kfree(buffer);
return user_len;
out_error:
- MESSAGE(KERN_WARNING, "%s",
- "/proc/dasd/statistics: only 'set on', 'set off' "
- "and 'reset' are supported verbs");
+ pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
+ str);
kfree(buffer);
return -EINVAL;
#else
- MESSAGE(KERN_WARNING, "%s",
- "/proc/dasd/statistics: is not activated in this kernel");
+ pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
return user_len;
#endif /* CONFIG_DASD_PROFILE */
}
@@ -318,7 +320,6 @@ dasd_proc_init(void)
dasd_proc_root_entry = proc_mkdir("dasd", NULL);
if (!dasd_proc_root_entry)
goto out_nodasd;
- dasd_proc_root_entry->owner = THIS_MODULE;
dasd_devices_entry = proc_create("devices",
S_IFREG | S_IRUGO | S_IWUSR,
dasd_proc_root_entry,
@@ -332,7 +333,6 @@ dasd_proc_init(void)
goto out_nostatistics;
dasd_statistics_entry->read_proc = dasd_statistics_read;
dasd_statistics_entry->write_proc = dasd_statistics_write;
- dasd_statistics_entry->owner = THIS_MODULE;
return 0;
out_nostatistics:
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index d0d565a05dfe..c07809c8016a 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -324,8 +324,6 @@ static inline void tape_proc_cleanup (void) {;}
#endif
/* a function for dumping device sense info */
-extern void tape_dump_sense(struct tape_device *, struct tape_request *,
- struct irb *);
extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
struct irb *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 22ca34361ed7..807ded5eb049 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -8,6 +8,8 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
+#define KMSG_COMPONENT "tape"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
@@ -18,8 +20,6 @@
#include "tape.h"
#include "tape_std.h"
-#define PRINTK_HEADER "TAPE_34XX: "
-
/*
* Pointer to debug area.
*/
@@ -203,8 +203,7 @@ tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
tape_34xx_schedule_work(device, TO_MSEN);
} else {
DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
- PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
- tape_dump_sense(device, NULL, irb);
+ tape_dump_sense_dbf(device, NULL, irb);
}
return TAPE_IO_SUCCESS;
}
@@ -226,9 +225,7 @@ tape_34xx_erp_read_opposite(struct tape_device *device,
tape_std_read_backward(device, request);
return tape_34xx_erp_retry(request);
}
- if (request->op != TO_RBA)
- PRINT_ERR("read_opposite called with state:%s\n",
- tape_op_verbose[request->op]);
+
/*
* We tried to read forward and backward, but hat no
* success -> failed.
@@ -241,13 +238,9 @@ tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
struct irb *irb, int no)
{
if (request->op != TO_ASSIGN) {
- PRINT_WARN("An unexpected condition #%d was caught in "
- "tape error recovery.\n", no);
- PRINT_WARN("Please report this incident.\n");
- if (request)
- PRINT_WARN("Operation of tape:%s\n",
- tape_op_verbose[request->op]);
- tape_dump_sense(device, request, irb);
+ dev_err(&device->cdev->dev, "An unexpected condition %d "
+ "occurred in tape error recovery\n", no);
+ tape_dump_sense_dbf(device, request, irb);
}
return tape_34xx_erp_failed(request, -EIO);
}
@@ -261,9 +254,8 @@ tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
if (irb->ecw[3] == 0x40) {
- PRINT_WARN ("Data overrun error between control-unit "
- "and drive. Use a faster channel connection, "
- "if possible! \n");
+ dev_warn (&device->cdev->dev, "A data overrun occurred between"
+ " the control unit and tape unit\n");
return tape_34xx_erp_failed(request, -EIO);
}
return tape_34xx_erp_bug(device, request, irb, -1);
@@ -280,7 +272,8 @@ tape_34xx_erp_sequence(struct tape_device *device,
/*
* cu detected incorrect block-id sequence on tape.
*/
- PRINT_WARN("Illegal block-id sequence found!\n");
+ dev_warn (&device->cdev->dev, "The block ID sequence on the "
+ "tape is incorrect\n");
return tape_34xx_erp_failed(request, -EIO);
}
/*
@@ -393,8 +386,6 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
/* Writing at physical end of volume */
return tape_34xx_erp_failed(request, -ENOSPC);
default:
- PRINT_ERR("Invalid op in %s:%i\n",
- __func__, __LINE__);
return tape_34xx_erp_failed(request, 0);
}
}
@@ -420,7 +411,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
irb, -4);
/* data check is permanent, CU recovery has failed */
- PRINT_WARN("Permanent read error\n");
+ dev_warn (&device->cdev->dev, "A read error occurred "
+ "that cannot be recovered\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x25:
// a write data check occurred
@@ -433,22 +425,26 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
irb, -5);
// data check is permanent, cu-recovery has failed
- PRINT_WARN("Permanent write error\n");
+ dev_warn (&device->cdev->dev, "A write error on the "
+ "tape cannot be recovered\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x26:
/* Data Check (read opposite) occurred. */
return tape_34xx_erp_read_opposite(device, request);
case 0x28:
/* ID-Mark at tape start couldn't be written */
- PRINT_WARN("ID-Mark could not be written.\n");
+ dev_warn (&device->cdev->dev, "Writing the ID-mark "
+ "failed\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x31:
/* Tape void. Tried to read beyond end of device. */
- PRINT_WARN("Read beyond end of recorded area.\n");
+ dev_warn (&device->cdev->dev, "Reading the tape beyond"
+ " the end of the recorded area failed\n");
return tape_34xx_erp_failed(request, -ENOSPC);
case 0x41:
/* Record sequence error. */
- PRINT_WARN("Invalid block-id sequence found.\n");
+ dev_warn (&device->cdev->dev, "The tape contains an "
+ "incorrect block ID sequence\n");
return tape_34xx_erp_failed(request, -EIO);
default:
/* all data checks for 3480 should result in one of
@@ -470,16 +466,12 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
switch (sense[3]) {
case 0x00:
/* Unit check with erpa code 0. Report and ignore. */
- PRINT_WARN("Non-error sense was found. "
- "Unit-check will be ignored.\n");
return TAPE_IO_SUCCESS;
case 0x21:
/*
* Data streaming not operational. CU will switch to
* interlock mode. Reissue the command.
*/
- PRINT_WARN("Data streaming not operational. "
- "Switching to interlock-mode.\n");
return tape_34xx_erp_retry(request);
case 0x22:
/*
@@ -487,11 +479,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* error on the lower interface, internal path not usable,
* or error during cartridge load.
*/
- PRINT_WARN("A path equipment check occurred. One of the "
- "following conditions occurred:\n");
- PRINT_WARN("drive adapter error, buffer error on the lower "
- "interface, internal path not usable, error "
- "during cartridge load.\n");
+ dev_warn (&device->cdev->dev, "A path equipment check occurred"
+ " for the tape device\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x24:
/*
@@ -514,7 +503,6 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* but the hardware isn't capable to do idrc, or a perform
* subsystem func is issued and the CU is not on-line.
*/
- PRINT_WARN ("Function incompatible. Try to switch off idrc\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x2a:
/*
@@ -552,23 +540,26 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* reading the format id mark or that that format specified
* is not supported by the drive.
*/
- PRINT_WARN("Drive not capable processing the tape format!\n");
+ dev_warn (&device->cdev->dev, "The tape unit cannot process "
+ "the tape format\n");
return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
case 0x30:
/* The medium is write protected. */
- PRINT_WARN("Medium is write protected!\n");
+ dev_warn (&device->cdev->dev, "The tape medium is write-"
+ "protected\n");
return tape_34xx_erp_failed(request, -EACCES);
case 0x32:
// Tension loss. We cannot recover this, it's an I/O error.
- PRINT_WARN("The drive lost tape tension.\n");
+ dev_warn (&device->cdev->dev, "The tape does not have the "
+ "required tape tension\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x33:
/*
* Load Failure. The cartridge was not inserted correctly or
* the tape is not threaded correctly.
*/
- PRINT_WARN("Cartridge load failure. Reload the cartridge "
- "and try again.\n");
+ dev_warn (&device->cdev->dev, "The tape unit failed to load"
+ " the cartridge\n");
tape_34xx_delete_sbid_from(device, 0);
return tape_34xx_erp_failed(request, -EIO);
case 0x34:
@@ -576,8 +567,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* Unload failure. The drive cannot maintain tape tension
* and control tape movement during an unload operation.
*/
- PRINT_WARN("Failure during cartridge unload. "
- "Please try manually.\n");
+ dev_warn (&device->cdev->dev, "Automatic unloading of the tape"
+ " cartridge failed\n");
if (request->op == TO_RUN)
return tape_34xx_erp_failed(request, -EIO);
return tape_34xx_erp_bug(device, request, irb, sense[3]);
@@ -589,8 +580,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* - the cartridge loader does not respond correctly
* - a failure occurs during an index, load, or unload cycle
*/
- PRINT_WARN("Equipment check! Please check the drive and "
- "the cartridge loader.\n");
+ dev_warn (&device->cdev->dev, "An equipment check has occurred"
+ " on the tape unit\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x36:
if (device->cdev->id.driver_info == tape_3490)
@@ -603,7 +594,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* Tape length error. The tape is shorter than reported in
* the beginning-of-tape data.
*/
- PRINT_WARN("Tape length error.\n");
+ dev_warn (&device->cdev->dev, "The tape information states an"
+ " incorrect length\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x38:
/*
@@ -620,12 +612,12 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
return tape_34xx_erp_failed(request, -EIO);
case 0x3a:
/* Drive switched to not ready. */
- PRINT_WARN("Drive not ready. Turn the ready/not ready switch "
- "to ready position and try again.\n");
+ dev_warn (&device->cdev->dev, "The tape unit is not ready\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x3b:
/* Manual rewind or unload. This causes an I/O error. */
- PRINT_WARN("Medium was rewound or unloaded manually.\n");
+ dev_warn (&device->cdev->dev, "The tape medium has been "
+ "rewound or unloaded manually\n");
tape_34xx_delete_sbid_from(device, 0);
return tape_34xx_erp_failed(request, -EIO);
case 0x42:
@@ -633,7 +625,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* Degraded mode. A condition that can cause degraded
* performance is detected.
*/
- PRINT_WARN("Subsystem is running in degraded mode.\n");
+ dev_warn (&device->cdev->dev, "The tape subsystem is running "
+ "in degraded mode\n");
return tape_34xx_erp_retry(request);
case 0x43:
/* Drive not ready. */
@@ -652,7 +645,6 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
break;
}
}
- PRINT_WARN("The drive is not ready.\n");
return tape_34xx_erp_failed(request, -ENOMEDIUM);
case 0x44:
/* Locate Block unsuccessful. */
@@ -663,7 +655,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
return tape_34xx_erp_failed(request, -EIO);
case 0x45:
/* The drive is assigned to a different channel path. */
- PRINT_WARN("The drive is assigned elsewhere.\n");
+ dev_warn (&device->cdev->dev, "The tape unit is already "
+ "assigned\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x46:
/*
@@ -671,11 +664,12 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* the power supply may be switched off or
* the drive address may not be set correctly.
*/
- PRINT_WARN("The drive is not on-line.");
+ dev_warn (&device->cdev->dev, "The tape unit is not online\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x47:
/* Volume fenced. CU reports volume integrity is lost. */
- PRINT_WARN("Volume fenced. The volume integrity is lost.\n");
+ dev_warn (&device->cdev->dev, "The control unit has fenced "
+ "access to the tape volume\n");
tape_34xx_delete_sbid_from(device, 0);
return tape_34xx_erp_failed(request, -EIO);
case 0x48:
@@ -683,20 +677,21 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
return tape_34xx_erp_retry(request);
case 0x49:
/* Bus out check. A parity check error on the bus was found. */
- PRINT_WARN("Bus out check. A data transfer over the bus "
- "has been corrupted.\n");
+ dev_warn (&device->cdev->dev, "A parity error occurred on the "
+ "tape bus\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x4a:
/* Control unit erp failed. */
- PRINT_WARN("The control unit I/O error recovery failed.\n");
+ dev_warn (&device->cdev->dev, "I/O error recovery failed on "
+ "the tape control unit\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x4b:
/*
* CU and drive incompatible. The drive requests micro-program
* patches, which are not available on the CU.
*/
- PRINT_WARN("The drive needs microprogram patches from the "
- "control unit, which are not available.\n");
+ dev_warn (&device->cdev->dev, "The tape unit requires a "
+ "firmware update\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x4c:
/*
@@ -721,8 +716,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* the block to be written is larger than allowed for
* buffered mode.
*/
- PRINT_WARN("Maximum block size for buffered "
- "mode exceeded.\n");
+ dev_warn (&device->cdev->dev, "The maximum block size"
+ " for buffered mode is exceeded\n");
return tape_34xx_erp_failed(request, -ENOBUFS);
}
/* This erpa is reserved for 3480. */
@@ -759,22 +754,20 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
return tape_34xx_erp_retry(request);
case 0x55:
/* Channel interface recovery (permanent). */
- PRINT_WARN("A permanent channel interface error occurred.\n");
+ dev_warn (&device->cdev->dev, "A channel interface error cannot be"
+ " recovered\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x56:
/* Channel protocol error. */
- PRINT_WARN("A channel protocol error occurred.\n");
+ dev_warn (&device->cdev->dev, "A channel protocol error "
+ "occurred\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x57:
if (device->cdev->id.driver_info == tape_3480) {
/* Attention intercept. */
- PRINT_WARN("An attention intercept occurred, "
- "which will be recovered.\n");
return tape_34xx_erp_retry(request);
} else {
/* Global status intercept. */
- PRINT_WARN("An global status intercept was received, "
- "which will be recovered.\n");
return tape_34xx_erp_retry(request);
}
case 0x5a:
@@ -782,42 +775,31 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
* Tape length incompatible. The tape inserted is too long,
* which could cause damage to the tape or the drive.
*/
- PRINT_WARN("Tape Length Incompatible\n");
- PRINT_WARN("Tape length exceeds IBM enhanced capacity "
- "cartdridge length or a medium\n");
- PRINT_WARN("with EC-CST identification mark has been mounted "
- "in a device that writes\n");
- PRINT_WARN("3480 or 3480 XF format.\n");
+ dev_warn (&device->cdev->dev, "The tape unit does not support "
+ "the tape length\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x5b:
/* Format 3480 XF incompatible */
if (sense[1] & SENSE_BEGINNING_OF_TAPE)
/* The tape will get overwritten. */
return tape_34xx_erp_retry(request);
- PRINT_WARN("Format 3480 XF Incompatible\n");
- PRINT_WARN("Medium has been created in 3480 format. "
- "To change the format writes\n");
- PRINT_WARN("must be issued at BOT.\n");
+ dev_warn (&device->cdev->dev, "The tape unit does not support"
+ " format 3480 XF\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x5c:
/* Format 3480-2 XF incompatible */
- PRINT_WARN("Format 3480-2 XF Incompatible\n");
- PRINT_WARN("Device can only read 3480 or 3480 XF format.\n");
+ dev_warn (&device->cdev->dev, "The tape unit does not support tape "
+ "format 3480-2 XF\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x5d:
/* Tape length violation. */
- PRINT_WARN("Tape Length Violation\n");
- PRINT_WARN("The mounted tape exceeds IBM Enhanced Capacity "
- "Cartdridge System Tape length.\n");
- PRINT_WARN("This may cause damage to the drive or tape when "
- "processing to the EOV\n");
+ dev_warn (&device->cdev->dev, "The tape unit does not support"
+ " the current tape length\n");
return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
case 0x5e:
/* Compaction algorithm incompatible. */
- PRINT_WARN("Compaction Algorithm Incompatible\n");
- PRINT_WARN("The volume is recorded using an incompatible "
- "compaction algorithm,\n");
- PRINT_WARN("which is not supported by the device.\n");
+ dev_warn (&device->cdev->dev, "The tape unit does not support"
+ " the compaction algorithm\n");
return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
/* The following erpas should have been covered earlier. */
@@ -848,7 +830,6 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request,
(irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
(request->op == TO_WRI)) {
/* Write at end of volume */
- PRINT_INFO("End of volume\n"); /* XXX */
return tape_34xx_erp_failed(request, -ENOSPC);
}
@@ -869,9 +850,7 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request,
}
DBF_EVENT(6, "xunknownirq\n");
- PRINT_ERR("Unexpected interrupt.\n");
- PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]);
- tape_dump_sense(device, request, irb);
+ tape_dump_sense_dbf(device, request, irb);
return TAPE_IO_STOP;
}
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 71605a179d65..fc1d91294143 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -8,12 +8,15 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
+#define KMSG_COMPONENT "tape"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <asm/ebcdic.h>
#define TAPE_DBF_AREA tape_3590_dbf
+#define BUFSIZE 512 /* size of buffers for dynamic generated messages */
#include "tape.h"
#include "tape_std.h"
@@ -36,7 +39,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
* - Read Alternate: implemented
*******************************************************************/
-#define PRINTK_HEADER "TAPE_3590: "
+#define KMSG_COMPONENT "tape"
static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
[0x00] = "",
@@ -661,8 +664,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
ccw++;
dst += TAPEBLOCK_HSEC_SIZE;
}
- if (off > bv->bv_len)
- BUG();
+ BUG_ON(off > bv->bv_len);
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
DBF_EVENT(6, "xBREDccwg\n");
@@ -726,7 +728,7 @@ static void tape_3590_med_state_set(struct tape_device *device,
}
c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
if (sense->flags & MSENSE_CRYPT_MASK) {
- PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags);
+ DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags);
c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
} else {
DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
@@ -847,8 +849,7 @@ tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
tape_3590_schedule_work(device, TO_READ_ATTMSG);
} else {
DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
- PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
- tape_dump_sense(device, NULL, irb);
+ tape_dump_sense_dbf(device, NULL, irb);
}
/* check medium state */
tape_3590_schedule_work(device, TO_MSEN);
@@ -876,8 +877,6 @@ tape_3590_erp_basic(struct tape_device *device, struct tape_request *request,
case SENSE_BRA_DRE:
return tape_3590_erp_failed(device, request, irb, rc);
default:
- PRINT_ERR("Unknown BRA %x - This should not happen!\n",
- sense->bra);
BUG();
return TAPE_IO_STOP;
}
@@ -910,7 +909,8 @@ tape_3590_erp_swap(struct tape_device *device, struct tape_request *request,
* should proceed with the new tape... this
* should probably be done in user space!
*/
- PRINT_WARN("(%s): Swap Tape Device!\n", dev_name(&device->cdev->dev));
+ dev_warn (&device->cdev->dev, "The tape medium must be loaded into a "
+ "different tape unit\n");
return tape_3590_erp_basic(device, request, irb, -EIO);
}
@@ -985,8 +985,6 @@ tape_3590_erp_read_opposite(struct tape_device *device,
return tape_3590_erp_failed(device, request, irb, -EIO);
break;
default:
- PRINT_WARN("read_opposite_recovery_called_with_op: %s\n",
- tape_op_verbose[request->op]);
return tape_3590_erp_failed(device, request, irb, -EIO);
}
}
@@ -998,50 +996,61 @@ static void
tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
{
struct tape_3590_sense *sense;
+ char *exception, *service;
+
+ exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+ service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+ if (!exception || !service)
+ goto out_nomem;
sense = (struct tape_3590_sense *) irb->ecw;
/* Exception Message */
switch (sense->fmt.f70.emc) {
case 0x02:
- PRINT_WARN("(%s): Data degraded\n",
- dev_name(&device->cdev->dev));
+ snprintf(exception, BUFSIZE, "Data degraded");
break;
case 0x03:
- PRINT_WARN("(%s): Data degraded in partion %i\n",
- dev_name(&device->cdev->dev), sense->fmt.f70.mp);
+ snprintf(exception, BUFSIZE, "Data degraded in partion %i",
+ sense->fmt.f70.mp);
break;
case 0x04:
- PRINT_WARN("(%s): Medium degraded\n",
- dev_name(&device->cdev->dev));
+ snprintf(exception, BUFSIZE, "Medium degraded");
break;
case 0x05:
- PRINT_WARN("(%s): Medium degraded in partition %i\n",
- dev_name(&device->cdev->dev), sense->fmt.f70.mp);
+ snprintf(exception, BUFSIZE, "Medium degraded in partition %i",
+ sense->fmt.f70.mp);
break;
case 0x06:
- PRINT_WARN("(%s): Block 0 Error\n",
- dev_name(&device->cdev->dev));
+ snprintf(exception, BUFSIZE, "Block 0 Error");
break;
case 0x07:
- PRINT_WARN("(%s): Medium Exception 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f70.md);
+ snprintf(exception, BUFSIZE, "Medium Exception 0x%02x",
+ sense->fmt.f70.md);
break;
default:
- PRINT_WARN("(%s): MIM ExMsg: 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f70.emc);
+ snprintf(exception, BUFSIZE, "0x%02x",
+ sense->fmt.f70.emc);
break;
}
/* Service Message */
switch (sense->fmt.f70.smc) {
case 0x02:
- PRINT_WARN("(%s): Reference Media maintenance procedure %i\n",
- dev_name(&device->cdev->dev), sense->fmt.f70.md);
+ snprintf(service, BUFSIZE, "Reference Media maintenance "
+ "procedure %i", sense->fmt.f70.md);
break;
default:
- PRINT_WARN("(%s): MIM ServiceMsg: 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f70.smc);
+ snprintf(service, BUFSIZE, "0x%02x",
+ sense->fmt.f70.smc);
break;
}
+
+ dev_warn (&device->cdev->dev, "Tape media information: exception %s, "
+ "service %s\n", exception, service);
+
+out_nomem:
+ kfree(exception);
+ kfree(service);
}
/*
@@ -1051,108 +1060,108 @@ static void
tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
{
struct tape_3590_sense *sense;
+ char *exception, *service;
+
+ exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+ service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+ if (!exception || !service)
+ goto out_nomem;
sense = (struct tape_3590_sense *) irb->ecw;
/* Exception Message */
switch (sense->fmt.f71.emc) {
case 0x01:
- PRINT_WARN("(%s): Effect of failure is unknown\n",
- dev_name(&device->cdev->dev));
+ snprintf(exception, BUFSIZE, "Effect of failure is unknown");
break;
case 0x02:
- PRINT_WARN("(%s): CU Exception - no performance impact\n",
- dev_name(&device->cdev->dev));
+ snprintf(exception, BUFSIZE, "CU Exception - no performance "
+ "impact");
break;
case 0x03:
- PRINT_WARN("(%s): CU Exception on channel interface 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
+ snprintf(exception, BUFSIZE, "CU Exception on channel "
+ "interface 0x%02x", sense->fmt.f71.md[0]);
break;
case 0x04:
- PRINT_WARN("(%s): CU Exception on device path 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
+ snprintf(exception, BUFSIZE, "CU Exception on device path "
+ "0x%02x", sense->fmt.f71.md[0]);
break;
case 0x05:
- PRINT_WARN("(%s): CU Exception on library path 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
+ snprintf(exception, BUFSIZE, "CU Exception on library path "
+ "0x%02x", sense->fmt.f71.md[0]);
break;
case 0x06:
- PRINT_WARN("(%s): CU Exception on node 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
+ snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x",
+ sense->fmt.f71.md[0]);
break;
case 0x07:
- PRINT_WARN("(%s): CU Exception on partition 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
+ snprintf(exception, BUFSIZE, "CU Exception on partition "
+ "0x%02x", sense->fmt.f71.md[0]);
break;
default:
- PRINT_WARN("(%s): SIM ExMsg: 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.emc);
+ snprintf(exception, BUFSIZE, "0x%02x",
+ sense->fmt.f71.emc);
}
/* Service Message */
switch (sense->fmt.f71.smc) {
case 0x01:
- PRINT_WARN("(%s): Repair impact is unknown\n",
- dev_name(&device->cdev->dev));
+ snprintf(service, BUFSIZE, "Repair impact is unknown");
break;
case 0x02:
- PRINT_WARN("(%s): Repair will not impact cu performance\n",
- dev_name(&device->cdev->dev));
+ snprintf(service, BUFSIZE, "Repair will not impact cu "
+ "performance");
break;
case 0x03:
if (sense->fmt.f71.mdf == 0)
- PRINT_WARN("(%s): Repair will disable node "
- "0x%x on CU\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1]);
+ snprintf(service, BUFSIZE, "Repair will disable node "
+ "0x%x on CU", sense->fmt.f71.md[1]);
else
- PRINT_WARN("(%s): Repair will disable nodes "
- "(0x%x-0x%x) on CU\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1],
+ sense->fmt.f71.md[2]);
break;
case 0x04:
if (sense->fmt.f71.mdf == 0)
- PRINT_WARN("(%s): Repair will disable cannel path "
- "0x%x on CU\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "channel path 0x%x on CU",
+ sense->fmt.f71.md[1]);
else
- PRINT_WARN("(%s): Repair will disable cannel paths "
- "(0x%x-0x%x) on CU\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+ snprintf(service, BUFSIZE, "Repair will disable cannel"
+ " paths (0x%x-0x%x) on CU",
+ sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x05:
if (sense->fmt.f71.mdf == 0)
- PRINT_WARN("(%s): Repair will disable device path "
- "0x%x on CU\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1]);
+ snprintf(service, BUFSIZE, "Repair will disable device"
+ " path 0x%x on CU", sense->fmt.f71.md[1]);
else
- PRINT_WARN("(%s): Repair will disable device paths "
- "(0x%x-0x%x) on CU\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+ snprintf(service, BUFSIZE, "Repair will disable device"
+ " paths (0x%x-0x%x) on CU",
+ sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x06:
if (sense->fmt.f71.mdf == 0)
- PRINT_WARN("(%s): Repair will disable library path "
- "0x%x on CU\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "library path 0x%x on CU",
+ sense->fmt.f71.md[1]);
else
- PRINT_WARN("(%s): Repair will disable library paths "
- "(0x%x-0x%x) on CU\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "library paths (0x%x-0x%x) on CU",
+ sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x07:
- PRINT_WARN("(%s): Repair will disable access to CU\n",
- dev_name(&device->cdev->dev));
+ snprintf(service, BUFSIZE, "Repair will disable access to CU");
break;
default:
- PRINT_WARN("(%s): SIM ServiceMsg: 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.smc);
+ snprintf(service, BUFSIZE, "0x%02x",
+ sense->fmt.f71.smc);
}
+
+ dev_warn (&device->cdev->dev, "I/O subsystem information: exception"
+ " %s, service %s\n", exception, service);
+out_nomem:
+ kfree(exception);
+ kfree(service);
}
/*
@@ -1162,111 +1171,109 @@ static void
tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb)
{
struct tape_3590_sense *sense;
+ char *exception, *service;
+
+ exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+ service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+ if (!exception || !service)
+ goto out_nomem;
sense = (struct tape_3590_sense *) irb->ecw;
/* Exception Message */
switch (sense->fmt.f71.emc) {
case 0x01:
- PRINT_WARN("(%s): Effect of failure is unknown\n",
- dev_name(&device->cdev->dev));
+ snprintf(exception, BUFSIZE, "Effect of failure is unknown");
break;
case 0x02:
- PRINT_WARN("(%s): DV Exception - no performance impact\n",
- dev_name(&device->cdev->dev));
+ snprintf(exception, BUFSIZE, "DV Exception - no performance"
+ " impact");
break;
case 0x03:
- PRINT_WARN("(%s): DV Exception on channel interface 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
+ snprintf(exception, BUFSIZE, "DV Exception on channel "
+ "interface 0x%02x", sense->fmt.f71.md[0]);
break;
case 0x04:
- PRINT_WARN("(%s): DV Exception on loader 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
+ snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x",
+ sense->fmt.f71.md[0]);
break;
case 0x05:
- PRINT_WARN("(%s): DV Exception on message display 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
+ snprintf(exception, BUFSIZE, "DV Exception on message display"
+ " 0x%02x", sense->fmt.f71.md[0]);
break;
case 0x06:
- PRINT_WARN("(%s): DV Exception in tape path\n",
- dev_name(&device->cdev->dev));
+ snprintf(exception, BUFSIZE, "DV Exception in tape path");
break;
case 0x07:
- PRINT_WARN("(%s): DV Exception in drive\n",
- dev_name(&device->cdev->dev));
+ snprintf(exception, BUFSIZE, "DV Exception in drive");
break;
default:
- PRINT_WARN("(%s): DSIM ExMsg: 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.emc);
+ snprintf(exception, BUFSIZE, "0x%02x",
+ sense->fmt.f71.emc);
}
/* Service Message */
switch (sense->fmt.f71.smc) {
case 0x01:
- PRINT_WARN("(%s): Repair impact is unknown\n",
- dev_name(&device->cdev->dev));
+ snprintf(service, BUFSIZE, "Repair impact is unknown");
break;
case 0x02:
- PRINT_WARN("(%s): Repair will not impact device performance\n",
- dev_name(&device->cdev->dev));
+ snprintf(service, BUFSIZE, "Repair will not impact device "
+ "performance");
break;
case 0x03:
if (sense->fmt.f71.mdf == 0)
- PRINT_WARN("(%s): Repair will disable channel path "
- "0x%x on DV\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "channel path 0x%x on DV",
+ sense->fmt.f71.md[1]);
else
- PRINT_WARN("(%s): Repair will disable channel path "
- "(0x%x-0x%x) on DV\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "channel path (0x%x-0x%x) on DV",
+ sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x04:
if (sense->fmt.f71.mdf == 0)
- PRINT_WARN("(%s): Repair will disable interface 0x%x "
- "on DV\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "interface 0x%x on DV", sense->fmt.f71.md[1]);
else
- PRINT_WARN("(%s): Repair will disable interfaces "
- "(0x%x-0x%x) on DV\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "interfaces (0x%x-0x%x) on DV",
+ sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x05:
if (sense->fmt.f71.mdf == 0)
- PRINT_WARN("(%s): Repair will disable loader 0x%x "
- "on DV\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1]);
+ snprintf(service, BUFSIZE, "Repair will disable loader"
+ " 0x%x on DV", sense->fmt.f71.md[1]);
else
- PRINT_WARN("(%s): Repair will disable loader "
- "(0x%x-0x%x) on DV\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+ snprintf(service, BUFSIZE, "Repair will disable loader"
+ " (0x%x-0x%x) on DV",
+ sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x07:
- PRINT_WARN("(%s): Repair will disable access to DV\n",
- dev_name(&device->cdev->dev));
+ snprintf(service, BUFSIZE, "Repair will disable access to DV");
break;
case 0x08:
if (sense->fmt.f71.mdf == 0)
- PRINT_WARN("(%s): Repair will disable message "
- "display 0x%x on DV\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "message display 0x%x on DV",
+ sense->fmt.f71.md[1]);
else
- PRINT_WARN("(%s): Repair will disable message "
- "displays (0x%x-0x%x) on DV\n",
- dev_name(&device->cdev->dev),
- sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+ snprintf(service, BUFSIZE, "Repair will disable "
+ "message displays (0x%x-0x%x) on DV",
+ sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x09:
- PRINT_WARN("(%s): Clean DV\n", dev_name(&device->cdev->dev));
+ snprintf(service, BUFSIZE, "Clean DV");
break;
default:
- PRINT_WARN("(%s): DSIM ServiceMsg: 0x%02x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.smc);
+ snprintf(service, BUFSIZE, "0x%02x",
+ sense->fmt.f71.smc);
}
+
+ dev_warn (&device->cdev->dev, "Device subsystem information: exception"
+ " %s, service %s\n", exception, service);
+out_nomem:
+ kfree(exception);
+ kfree(service);
}
/*
@@ -1282,46 +1289,44 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
return;
if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) {
if (tape_3590_msg[sense->mc] != NULL)
- PRINT_WARN("(%s): %s\n", dev_name(&device->cdev->dev),
- tape_3590_msg[sense->mc]);
- else {
- PRINT_WARN("(%s): Message Code 0x%x\n",
- dev_name(&device->cdev->dev), sense->mc);
- }
+ dev_warn (&device->cdev->dev, "The tape unit has "
+ "issued sense message %s\n",
+ tape_3590_msg[sense->mc]);
+ else
+ dev_warn (&device->cdev->dev, "The tape unit has "
+ "issued an unknown sense message code 0x%x\n",
+ sense->mc);
return;
}
if (sense->mc == 0xf0) {
/* Standard Media Information Message */
- PRINT_WARN("(%s): MIM SEV=%i, MC=%02x, ES=%x/%x, "
- "RC=%02x-%04x-%02x\n", dev_name(&device->cdev->dev),
- sense->fmt.f70.sev, sense->mc,
- sense->fmt.f70.emc, sense->fmt.f70.smc,
- sense->fmt.f70.refcode, sense->fmt.f70.mid,
- sense->fmt.f70.fid);
+ dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, "
+ "RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc,
+ sense->fmt.f70.emc, sense->fmt.f70.smc,
+ sense->fmt.f70.refcode, sense->fmt.f70.mid,
+ sense->fmt.f70.fid);
tape_3590_print_mim_msg_f0(device, irb);
return;
}
if (sense->mc == 0xf1) {
/* Standard I/O Subsystem Service Information Message */
- PRINT_WARN("(%s): IOSIM SEV=%i, DEVTYPE=3590/%02x, "
- "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.sev,
- device->cdev->id.dev_model,
- sense->mc, sense->fmt.f71.emc,
- sense->fmt.f71.smc, sense->fmt.f71.refcode1,
- sense->fmt.f71.refcode2, sense->fmt.f71.refcode3);
+ dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x,"
+ " MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
+ sense->fmt.f71.sev, device->cdev->id.dev_model,
+ sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
+ sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
+ sense->fmt.f71.refcode3);
tape_3590_print_io_sim_msg_f1(device, irb);
return;
}
if (sense->mc == 0xf2) {
/* Standard Device Service Information Message */
- PRINT_WARN("(%s): DEVSIM SEV=%i, DEVTYPE=3590/%02x, "
- "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
- dev_name(&device->cdev->dev), sense->fmt.f71.sev,
- device->cdev->id.dev_model,
- sense->mc, sense->fmt.f71.emc,
- sense->fmt.f71.smc, sense->fmt.f71.refcode1,
- sense->fmt.f71.refcode2, sense->fmt.f71.refcode3);
+ dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x"
+ ", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
+ sense->fmt.f71.sev, device->cdev->id.dev_model,
+ sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
+ sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
+ sense->fmt.f71.refcode3);
tape_3590_print_dev_sim_msg_f2(device, irb);
return;
}
@@ -1329,8 +1334,8 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
/* Standard Library Service Information Message */
return;
}
- PRINT_WARN("(%s): Device Message(%x)\n",
- dev_name(&device->cdev->dev), sense->mc);
+ dev_warn (&device->cdev->dev, "The tape unit has issued an unknown "
+ "sense message code %x\n", sense->mc);
}
static int tape_3590_crypt_error(struct tape_device *device,
@@ -1355,9 +1360,8 @@ static int tape_3590_crypt_error(struct tape_device *device,
/* No connection to EKM */
return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
- PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id);
- PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc,
- drv_rc, ekm_rc1, ekm_rc2);
+ dev_err (&device->cdev->dev, "The tape unit failed to obtain the "
+ "encryption key from EKM\n");
return tape_3590_erp_basic(device, request, irb, -ENOKEY);
}
@@ -1443,8 +1447,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
* print additional msg since default msg
* "device intervention" is not very meaningfull
*/
- PRINT_WARN("(%s): Tape operation when medium not loaded\n",
- dev_name(&device->cdev->dev));
tape_med_state_set(device, MS_UNLOADED);
tape_3590_schedule_work(device, TO_CRYPT_OFF);
return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
@@ -1490,19 +1492,13 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
case 0x6020:
- PRINT_WARN("(%s): Cartridge of wrong type ?\n",
- dev_name(&device->cdev->dev));
return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
case 0x8011:
- PRINT_WARN("(%s): Another host has reserved the tape device\n",
- dev_name(&device->cdev->dev));
return tape_3590_erp_basic(device, request, irb, -EPERM);
case 0x8013:
- PRINT_WARN("(%s): Another host has privileged access to the "
- "tape device\n", dev_name(&device->cdev->dev));
- PRINT_WARN("(%s): To solve the problem unload the current "
- "cartridge!\n", dev_name(&device->cdev->dev));
+ dev_warn (&device->cdev->dev, "A different host has privileged"
+ " access to the tape unit\n");
return tape_3590_erp_basic(device, request, irb, -EPERM);
default:
return tape_3590_erp_basic(device, request, irb, -EIO);
@@ -1552,9 +1548,7 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
}
DBF_EVENT(6, "xunknownirq\n");
- PRINT_ERR("Unexpected interrupt.\n");
- PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]);
- tape_dump_sense(device, request, irb);
+ tape_dump_sense_dbf(device, request, irb);
return TAPE_IO_STOP;
}
@@ -1609,7 +1603,6 @@ tape_3590_setup_device(struct tape_device *device)
if (rc)
goto fail_rdc_data;
if (rdc_data->data[31] == 0x13) {
- PRINT_INFO("Device has crypto support\n");
data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
tape_3592_disable_crypt(device);
} else {
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index ae18baf59f06..f32e89e7c4f2 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -10,6 +10,8 @@
* Stefan Bader <shbader@de.ibm.com>
*/
+#define KMSG_COMPONENT "tape"
+
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/blkdev.h>
@@ -23,8 +25,6 @@
#include "tape.h"
-#define PRINTK_HEADER "TAPE_BLOCK: "
-
#define TAPEBLOCK_MAX_SEC 100
#define TAPEBLOCK_MIN_REQUEUE 3
@@ -279,8 +279,6 @@ tapeblock_cleanup_device(struct tape_device *device)
tape_put_device(device);
if (!device->blk_data.disk) {
- PRINT_ERR("(%s): No gendisk to clean up!\n",
- dev_name(&device->cdev->dev));
goto cleanup_queue;
}
@@ -314,7 +312,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
if (!device->blk_data.medium_changed)
return 0;
- PRINT_INFO("Detecting media size...\n");
+ dev_info(&device->cdev->dev, "Determining the size of the recorded "
+ "area...\n");
rc = tape_mtop(device, MTFSFM, 1);
if (rc)
return rc;
@@ -341,7 +340,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
device->bof = rc;
nr_of_blks -= rc;
- PRINT_INFO("Found %i blocks on media\n", nr_of_blks);
+ dev_info(&device->cdev->dev, "The size of the recorded area is %i "
+ "blocks\n", nr_of_blks);
set_capacity(device->blk_data.disk,
nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
@@ -376,8 +376,8 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
if (device->required_tapemarks) {
DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
- PRINT_ERR("TBLOCK: Refusing to open tape with missing"
- " end of file marks.\n");
+ dev_warn(&device->cdev->dev, "Opening the tape failed because"
+ " of missing end-of-file marks\n");
rc = -EPERM;
goto put_device;
}
@@ -452,7 +452,6 @@ tapeblock_ioctl(
rc = -EINVAL;
break;
default:
- PRINT_WARN("invalid ioctl 0x%x\n", command);
rc = -EINVAL;
}
@@ -474,7 +473,6 @@ tapeblock_init(void)
if (tapeblock_major == 0)
tapeblock_major = rc;
- PRINT_INFO("tape gets major %d for block device\n", tapeblock_major);
return 0;
}
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index be0ce2215c8d..31566c55adfe 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -24,8 +24,6 @@
#include "tape_std.h"
#include "tape_class.h"
-#define PRINTK_HEADER "TAPE_CHAR: "
-
#define TAPECHAR_MAJOR 0 /* get dynamic major */
/*
@@ -102,8 +100,6 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
if (block_size > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
block_size, MAX_BLOCKSIZE);
- PRINT_ERR("Invalid blocksize (%zd> %d)\n",
- block_size, MAX_BLOCKSIZE);
return -EINVAL;
}
@@ -485,7 +481,6 @@ tapechar_init (void)
return -1;
tapechar_major = MAJOR(dev);
- PRINT_INFO("tape gets major %d for character devices\n", MAJOR(dev));
return 0;
}
@@ -496,7 +491,5 @@ tapechar_init (void)
void
tapechar_exit(void)
{
- PRINT_INFO("tape releases major %d for character devices\n",
- tapechar_major);
unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
}
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index f9bb51fa7f5b..08c09d3503cf 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -11,6 +11,7 @@
* Stefan Bader <shbader@de.ibm.com>
*/
+#define KMSG_COMPONENT "tape"
#include <linux/module.h>
#include <linux/init.h> // for kernel parameters
#include <linux/kmod.h> // for requesting modules
@@ -25,7 +26,6 @@
#include "tape.h"
#include "tape_std.h"
-#define PRINTK_HEADER "TAPE_CORE: "
#define LONG_BUSY_TIMEOUT 180 /* seconds */
static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
@@ -214,13 +214,13 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
switch(newstate){
case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0);
- PRINT_INFO("(%s): Tape is unloaded\n",
- dev_name(&device->cdev->dev));
+ dev_info(&device->cdev->dev, "The tape cartridge has been "
+ "successfully unloaded\n");
break;
case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
- PRINT_INFO("(%s): Tape has been mounted\n",
- dev_name(&device->cdev->dev));
+ dev_info(&device->cdev->dev, "A tape cartridge has been "
+ "mounted\n");
break;
default:
// print nothing
@@ -333,7 +333,6 @@ tape_generic_online(struct tape_device *device,
/* Let the discipline have a go at the device. */
device->discipline = discipline;
if (!try_module_get(discipline->owner)) {
- PRINT_ERR("Cannot get module. Module gone.\n");
return -EINVAL;
}
@@ -391,7 +390,6 @@ int
tape_generic_offline(struct tape_device *device)
{
if (!device) {
- PRINT_ERR("tape_generic_offline: no such device\n");
return -ENODEV;
}
@@ -413,9 +411,6 @@ tape_generic_offline(struct tape_device *device)
DBF_EVENT(3, "(%08x): Set offline failed "
"- drive in use.\n",
device->cdev_id);
- PRINT_WARN("(%s): Set offline failed "
- "- drive in use.\n",
- dev_name(&device->cdev->dev));
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return -EBUSY;
}
@@ -435,14 +430,11 @@ tape_alloc_device(void)
device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
if (device == NULL) {
DBF_EXCEPTION(2, "ti:no mem\n");
- PRINT_INFO ("can't allocate memory for "
- "tape info structure\n");
return ERR_PTR(-ENOMEM);
}
device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
if (device->modeset_byte == NULL) {
DBF_EXCEPTION(2, "ti:no mem\n");
- PRINT_INFO("can't allocate memory for modeset byte\n");
kfree(device);
return ERR_PTR(-ENOMEM);
}
@@ -490,7 +482,6 @@ tape_put_device(struct tape_device *device)
} else {
if (remain < 0) {
DBF_EVENT(4, "put device without reference\n");
- PRINT_ERR("put device without reference\n");
} else {
DBF_EVENT(4, "tape_free_device(%p)\n", device);
kfree(device->modeset_byte);
@@ -538,8 +529,6 @@ tape_generic_probe(struct ccw_device *cdev)
ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
if (ret) {
tape_put_device(device);
- PRINT_ERR("probe failed for tape device %s\n",
- dev_name(&cdev->dev));
return ret;
}
cdev->dev.driver_data = device;
@@ -547,7 +536,6 @@ tape_generic_probe(struct ccw_device *cdev)
device->cdev = cdev;
ccw_device_get_id(cdev, &dev_id);
device->cdev_id = devid_to_int(&dev_id);
- PRINT_INFO("tape device %s found\n", dev_name(&cdev->dev));
return ret;
}
@@ -584,7 +572,6 @@ tape_generic_remove(struct ccw_device *cdev)
device = cdev->dev.driver_data;
if (!device) {
- PRINT_ERR("No device pointer in tape_generic_remove!\n");
return;
}
DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
@@ -615,10 +602,8 @@ tape_generic_remove(struct ccw_device *cdev)
*/
DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
device->cdev_id);
- PRINT_WARN("(%s): Drive in use vanished - "
- "expect trouble!\n",
- dev_name(&device->cdev->dev));
- PRINT_WARN("State was %i\n", device->tape_state);
+ dev_warn(&device->cdev->dev, "A tape unit was detached"
+ " while in use\n");
tape_state_set(device, TS_NOT_OPER);
__tape_discard_requests(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
@@ -639,8 +624,7 @@ tape_alloc_request(int cplength, int datasize)
{
struct tape_request *request;
- if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
- BUG();
+ BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
@@ -797,8 +781,7 @@ static void tape_long_busy_timeout(unsigned long data)
device = (struct tape_device *) data;
spin_lock_irq(get_ccwdev_lock(device->cdev));
request = list_entry(device->req_queue.next, struct tape_request, list);
- if (request->status != TAPE_REQUEST_LONG_BUSY)
- BUG();
+ BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
__tape_start_next_request(device);
device->lb_timeout.data = (unsigned long) tape_put_device(device);
@@ -830,30 +813,6 @@ __tape_end_request(
}
/*
- * Write sense data to console/dbf
- */
-void
-tape_dump_sense(struct tape_device* device, struct tape_request *request,
- struct irb *irb)
-{
- unsigned int *sptr;
-
- PRINT_INFO("-------------------------------------------------\n");
- PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
- irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa);
- PRINT_INFO("DEVICE: %s\n", dev_name(&device->cdev->dev));
- if (request != NULL)
- PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
-
- sptr = (unsigned int *) irb->ecw;
- PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
- sptr[0], sptr[1], sptr[2], sptr[3]);
- PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
- sptr[4], sptr[5], sptr[6], sptr[7]);
- PRINT_INFO("--------------------------------------------------\n");
-}
-
-/*
* Write sense data to dbf
*/
void
@@ -1051,8 +1010,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
device = (struct tape_device *) cdev->dev.driver_data;
if (device == NULL) {
- PRINT_ERR("could not get device structure for %s "
- "in interrupt\n", dev_name(&cdev->dev));
return;
}
request = (struct tape_request *) intparm;
@@ -1064,13 +1021,13 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
/* FIXME: What to do with the request? */
switch (PTR_ERR(irb)) {
case -ETIMEDOUT:
- PRINT_WARN("(%s): Request timed out\n",
+ DBF_LH(1, "(%s): Request timed out\n",
dev_name(&cdev->dev));
case -EIO:
__tape_end_request(device, request, -EIO);
break;
default:
- PRINT_ERR("(%s): Unexpected i/o error %li\n",
+ DBF_LH(1, "(%s): Unexpected i/o error %li\n",
dev_name(&cdev->dev),
PTR_ERR(irb));
}
@@ -1182,8 +1139,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
default:
if (rc > 0) {
DBF_EVENT(6, "xunknownrc\n");
- PRINT_ERR("Invalid return code from discipline "
- "interrupt function.\n");
__tape_end_request(device, request, -EIO);
} else {
__tape_end_request(device, request, rc);
@@ -1323,7 +1278,6 @@ EXPORT_SYMBOL(tape_state_set);
EXPORT_SYMBOL(tape_med_state_set);
EXPORT_SYMBOL(tape_alloc_request);
EXPORT_SYMBOL(tape_free_request);
-EXPORT_SYMBOL(tape_dump_sense);
EXPORT_SYMBOL(tape_dump_sense_dbf);
EXPORT_SYMBOL(tape_do_io);
EXPORT_SYMBOL(tape_do_io_async);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 8a376af926a7..202f42132939 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -20,8 +20,6 @@
#include "tape.h"
-#define PRINTK_HEADER "TAPE_PROC: "
-
static const char *tape_med_st_verbose[MS_SIZE] =
{
[MS_UNKNOWN] = "UNKNOWN ",
@@ -128,7 +126,6 @@ tape_proc_init(void)
proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
&tape_proc_ops);
if (tape_proc_devices == NULL) {
- PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
return;
}
}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 5bd573d144d6..1a9420ba518d 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -26,8 +26,6 @@
#include "tape.h"
#include "tape_std.h"
-#define PRINTK_HEADER "TAPE_STD: "
-
/*
* tape_std_assign
*/
@@ -39,16 +37,15 @@ tape_std_assign_timeout(unsigned long data)
int rc;
request = (struct tape_request *) data;
- if ((device = request->device) == NULL)
- BUG();
+ device = request->device;
+ BUG_ON(!device);
DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
device->cdev_id);
rc = tape_cancel_io(device, request);
if(rc)
- PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n",
+ DBF_EVENT(3, "(%s): Assign timeout: Cancel failed with rc = %i\n",
dev_name(&device->cdev->dev), rc);
-
}
int
@@ -82,8 +79,6 @@ tape_std_assign(struct tape_device *device)
del_timer(&timeout);
if (rc != 0) {
- PRINT_WARN("%s: assign failed - device might be busy\n",
- dev_name(&device->cdev->dev));
DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
device->cdev_id);
} else {
@@ -105,8 +100,6 @@ tape_std_unassign (struct tape_device *device)
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(3, "(%08x): Can't unassign device\n",
device->cdev_id);
- PRINT_WARN("(%s): Can't unassign device - device gone\n",
- dev_name(&device->cdev->dev));
return -EIO;
}
@@ -120,8 +113,6 @@ tape_std_unassign (struct tape_device *device)
if ((rc = tape_do_io(device, request)) != 0) {
DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
- PRINT_WARN("%s: Unassign failed\n",
- dev_name(&device->cdev->dev));
} else {
DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
}
@@ -242,8 +233,6 @@ tape_std_mtsetblk(struct tape_device *device, int count)
if (count > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
count, MAX_BLOCKSIZE);
- PRINT_ERR("Invalid block size (%d > %d) given.\n",
- count, MAX_BLOCKSIZE);
return -EINVAL;
}
@@ -633,14 +622,6 @@ tape_std_mtcompression(struct tape_device *device, int mt_count)
if (mt_count < 0 || mt_count > 1) {
DBF_EXCEPTION(6, "xcom parm\n");
- if (*device->modeset_byte & 0x08)
- PRINT_INFO("(%s) Compression is currently on\n",
- dev_name(&device->cdev->dev));
- else
- PRINT_INFO("(%s) Compression is currently off\n",
- dev_name(&device->cdev->dev));
- PRINT_INFO("Use 1 to switch compression on, 0 to "
- "switch it off\n");
return -EINVAL;
}
request = tape_alloc_request(2, 0);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index eefc6611412e..1bbae433fbd8 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -5,7 +5,7 @@
*
* For more information please refer to Documentation/s390/zfcpdump.txt
*
- * Copyright IBM Corp. 2003,2007
+ * Copyright IBM Corp. 2003,2008
* Author(s): Michael Holzheu
*/
@@ -24,6 +24,7 @@
#include <asm/debug.h>
#include <asm/processor.h>
#include <asm/irqflags.h>
+#include <asm/checksum.h>
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
@@ -48,12 +49,19 @@ struct sys_info {
union save_area lc_mask;
};
+struct ipib_info {
+ unsigned long ipib;
+ u32 checksum;
+} __attribute__((packed));
+
static struct sys_info sys_info;
static struct debug_info *zcore_dbf;
static int hsa_available;
static struct dentry *zcore_dir;
static struct dentry *zcore_file;
static struct dentry *zcore_memmap_file;
+static struct dentry *zcore_reipl_file;
+static struct ipl_parameter_block *ipl_block;
/*
* Copy memory from HSA to kernel or user memory (not reentrant):
@@ -527,6 +535,33 @@ static const struct file_operations zcore_memmap_fops = {
.release = zcore_memmap_release,
};
+static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if (ipl_block) {
+ diag308(DIAG308_SET, ipl_block);
+ diag308(DIAG308_IPL, NULL);
+ }
+ return count;
+}
+
+static int zcore_reipl_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int zcore_reipl_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations zcore_reipl_fops = {
+ .owner = THIS_MODULE,
+ .write = zcore_reipl_write,
+ .open = zcore_reipl_open,
+ .release = zcore_reipl_release,
+};
+
static void __init set_s390_lc_mask(union save_area *map)
{
@@ -645,6 +680,40 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
return 0;
}
+/*
+ * Provide IPL parameter information block from either HSA or memory
+ * for future reipl
+ */
+static int __init zcore_reipl_init(void)
+{
+ struct ipib_info ipib_info;
+ int rc;
+
+ rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
+ if (rc)
+ return rc;
+ if (ipib_info.ipib == 0)
+ return 0;
+ ipl_block = (void *) __get_free_page(GFP_KERNEL);
+ if (!ipl_block)
+ return -ENOMEM;
+ if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
+ rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
+ else
+ rc = memcpy_real(ipl_block, ipib_info.ipib, PAGE_SIZE);
+ if (rc) {
+ free_page((unsigned long) ipl_block);
+ return rc;
+ }
+ if (csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+ ipib_info.checksum) {
+ TRACE("Checksum does not match\n");
+ free_page((unsigned long) ipl_block);
+ ipl_block = NULL;
+ }
+ return 0;
+}
+
static int __init zcore_init(void)
{
unsigned char arch;
@@ -690,6 +759,10 @@ static int __init zcore_init(void)
if (rc)
goto fail;
+ rc = zcore_reipl_init();
+ if (rc)
+ goto fail;
+
zcore_dir = debugfs_create_dir("zcore" , NULL);
if (!zcore_dir) {
rc = -ENOMEM;
@@ -707,9 +780,17 @@ static int __init zcore_init(void)
rc = -ENOMEM;
goto fail_file;
}
+ zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
+ NULL, &zcore_reipl_fops);
+ if (!zcore_reipl_file) {
+ rc = -ENOMEM;
+ goto fail_memmap_file;
+ }
hsa_available = 1;
return 0;
+fail_memmap_file:
+ debugfs_remove(zcore_memmap_file);
fail_file:
debugfs_remove(zcore_file);
fail_dir:
@@ -723,10 +804,15 @@ static void __exit zcore_exit(void)
{
debug_unregister(zcore_dbf);
sclp_sdias_exit();
+ free_page((unsigned long) ipl_block);
+ debugfs_remove(zcore_reipl_file);
+ debugfs_remove(zcore_memmap_file);
+ debugfs_remove(zcore_file);
+ debugfs_remove(zcore_dir);
diag308(DIAG308_REL_HSA, NULL);
}
-MODULE_AUTHOR("Copyright IBM Corp. 2003,2007");
+MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
MODULE_DESCRIPTION("zcore module for zfcpdump support");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index bd79bd165396..adb3dd301528 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -3,7 +3,7 @@
#
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
- fcx.o itcw.o
+ fcx.o itcw.o crw.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index fe6cea15bbaf..65d2e769dfa1 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -34,8 +34,8 @@ struct airq_t {
void *drv_data;
};
-static union indicator_t indicators[MAX_ISC];
-static struct airq_t *airqs[MAX_ISC][NR_AIRQS];
+static union indicator_t indicators[MAX_ISC+1];
+static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS];
static int register_airq(struct airq_t *airq, u8 isc)
{
@@ -133,6 +133,8 @@ void do_adapter_IO(u8 isc)
while (word) {
if (word & INDICATOR_MASK) {
airq = airqs[isc][i];
+ /* Make sure gcc reads from airqs only once. */
+ barrier();
if (likely(airq))
airq->handler(&indicators[isc].byte[i],
airq->drv_data);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index fe00be3675cd..6565f027791e 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -336,8 +336,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
size_t user_len, loff_t *offset)
{
char *buf;
- size_t i;
- ssize_t rc, ret;
+ ssize_t rc, ret, i;
if (*offset)
return -EINVAL;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 918e6fce2573..22ce765d537e 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -104,8 +104,9 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const
rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
out:
if (rc) {
- /* Release onoff "lock" when ungrouping failed. */
- atomic_set(&gdev->onoff, 0);
+ if (rc != -EAGAIN)
+ /* Release onoff "lock" when ungrouping failed. */
+ atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
@@ -314,16 +315,32 @@ error:
}
EXPORT_SYMBOL(ccwgroup_create_from_string);
-static int __init
-init_ccwgroup (void)
+static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
+ void *data);
+
+static struct notifier_block ccwgroup_nb = {
+ .notifier_call = ccwgroup_notifier
+};
+
+static int __init init_ccwgroup(void)
{
- return bus_register (&ccwgroup_bus_type);
+ int ret;
+
+ ret = bus_register(&ccwgroup_bus_type);
+ if (ret)
+ return ret;
+
+ ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+ if (ret)
+ bus_unregister(&ccwgroup_bus_type);
+
+ return ret;
}
-static void __exit
-cleanup_ccwgroup (void)
+static void __exit cleanup_ccwgroup(void)
{
- bus_unregister (&ccwgroup_bus_type);
+ bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+ bus_unregister(&ccwgroup_bus_type);
}
module_init(init_ccwgroup);
@@ -391,27 +408,28 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
unsigned long value;
int ret;
- gdev = to_ccwgroupdev(dev);
if (!dev->driver)
- return count;
+ return -ENODEV;
+
+ gdev = to_ccwgroupdev(dev);
+ gdrv = to_ccwgroupdrv(dev->driver);
- gdrv = to_ccwgroupdrv (gdev->dev.driver);
if (!try_module_get(gdrv->owner))
return -EINVAL;
ret = strict_strtoul(buf, 0, &value);
if (ret)
goto out;
- ret = count;
+
if (value == 1)
- ccwgroup_set_online(gdev);
+ ret = ccwgroup_set_online(gdev);
else if (value == 0)
- ccwgroup_set_offline(gdev);
+ ret = ccwgroup_set_offline(gdev);
else
ret = -EINVAL;
out:
module_put(gdrv->owner);
- return ret;
+ return (ret == 0) ? count : ret;
}
static ssize_t
@@ -453,13 +471,18 @@ ccwgroup_remove (struct device *dev)
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
+ device_remove_file(dev, &dev_attr_online);
+ device_remove_file(dev, &dev_attr_ungroup);
+
+ if (!dev->driver)
+ return 0;
+
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
- device_remove_file(dev, &dev_attr_online);
-
- if (gdrv && gdrv->remove)
+ if (gdrv->remove)
gdrv->remove(gdev);
+
return 0;
}
@@ -468,9 +491,13 @@ static void ccwgroup_shutdown(struct device *dev)
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
+ if (!dev->driver)
+ return;
+
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
- if (gdrv && gdrv->shutdown)
+
+ if (gdrv->shutdown)
gdrv->shutdown(gdev);
}
@@ -483,6 +510,19 @@ static struct bus_type ccwgroup_bus_type = {
.shutdown = ccwgroup_shutdown,
};
+
+static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_UNBIND_DRIVER)
+ device_schedule_callback(dev, ccwgroup_ungroup_callback);
+
+ return NOTIFY_OK;
+}
+
+
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1246f61a5338..3e5f304ad88f 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -17,8 +17,8 @@
#include <linux/errno.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
+#include <asm/crw.h>
-#include "../s390mach.h"
#include "cio.h"
#include "css.h"
#include "ioasm.h"
@@ -706,12 +706,12 @@ static int __init chp_init(void)
struct chp_id chpid;
int ret;
- ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw);
+ ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
return ret;
chp_wq = create_singlethread_workqueue("cio_chp");
if (!chp_wq) {
- s390_unregister_crw_handler(CRW_RSC_CPATH);
+ crw_unregister_handler(CRW_RSC_CPATH);
return -ENOMEM;
}
INIT_WORK(&cfg_work, cfg_func);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index ebab6ea4659b..883f16f96f22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -19,8 +19,8 @@
#include <asm/cio.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
+#include <asm/crw.h>
-#include "../s390mach.h"
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
@@ -589,6 +589,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
case 0x0102:
case 0x0103:
ret = -EINVAL;
+ break;
default:
ret = chsc_error_from_response(secm_area->response.code);
}
@@ -820,7 +821,7 @@ int __init chsc_alloc_sei_area(void)
"chsc machine checks!\n");
return -ENOMEM;
}
- ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
+ ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
if (ret)
kfree(sei_page);
return ret;
@@ -828,7 +829,7 @@ int __init chsc_alloc_sei_area(void)
void __init chsc_free_sei_area(void)
{
- s390_unregister_crw_handler(CRW_RSC_CSS);
+ crw_unregister_handler(CRW_RSC_CSS);
kfree(sei_page);
}
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 0a2f2edafc03..93eca1731b81 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -84,8 +84,8 @@ static int chsc_subchannel_probe(struct subchannel *sch)
kfree(private);
} else {
sch->private = private;
- if (sch->dev.uevent_suppress) {
- sch->dev.uevent_suppress = 0;
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 659f8a791656..2aebb9823044 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -30,6 +30,8 @@
#include <asm/isc.h>
#include <asm/cpu.h>
#include <asm/fcx.h>
+#include <asm/nmi.h>
+#include <asm/crw.h>
#include "cio.h"
#include "css.h"
#include "chsc.h"
@@ -38,7 +40,6 @@
#include "blacklist.h"
#include "cio_debug.h"
#include "chp.h"
-#include "../s390mach.h"
debug_info_t *cio_debug_msg_id;
debug_info_t *cio_debug_trace_id;
@@ -471,6 +472,7 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
int cio_disable_subchannel(struct subchannel *sch)
{
char dbf_txt[15];
+ int retry;
int ret;
CIO_TRACE_EVENT (2, "dissch");
@@ -481,16 +483,17 @@ int cio_disable_subchannel(struct subchannel *sch)
if (cio_update_schib(sch))
return -ENODEV;
- if (scsw_actl(&sch->schib.scsw) != 0)
- /*
- * the disable function must not be called while there are
- * requests pending for completion !
- */
- return -EBUSY;
-
sch->config.ena = 0;
- ret = cio_commit_config(sch);
+ for (retry = 0; retry < 3; retry++) {
+ ret = cio_commit_config(sch);
+ if (ret == -EBUSY) {
+ struct irb irb;
+ if (tsch(sch->schid, &irb) != 0)
+ break;
+ } else
+ break;
+ }
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (2, dbf_txt);
return ret;
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
new file mode 100644
index 000000000000..d157665d0e76
--- /dev/null
+++ b/drivers/s390/cio/crw.c
@@ -0,0 +1,159 @@
+/*
+ * Channel report handling code
+ *
+ * Copyright IBM Corp. 2000,2009
+ * Author(s): Ingo Adlung <adlung@de.ibm.com>,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Cornelia Huck <cornelia.huck@de.ibm.com>,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>,
+ */
+
+#include <linux/semaphore.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/init.h>
+#include <asm/crw.h>
+
+static struct semaphore crw_semaphore;
+static DEFINE_MUTEX(crw_handler_mutex);
+static crw_handler_t crw_handlers[NR_RSCS];
+
+/**
+ * crw_register_handler() - register a channel report word handler
+ * @rsc: reporting source code to handle
+ * @handler: handler to be registered
+ *
+ * Returns %0 on success and a negative error value otherwise.
+ */
+int crw_register_handler(int rsc, crw_handler_t handler)
+{
+ int rc = 0;
+
+ if ((rsc < 0) || (rsc >= NR_RSCS))
+ return -EINVAL;
+ mutex_lock(&crw_handler_mutex);
+ if (crw_handlers[rsc])
+ rc = -EBUSY;
+ else
+ crw_handlers[rsc] = handler;
+ mutex_unlock(&crw_handler_mutex);
+ return rc;
+}
+
+/**
+ * crw_unregister_handler() - unregister a channel report word handler
+ * @rsc: reporting source code to handle
+ */
+void crw_unregister_handler(int rsc)
+{
+ if ((rsc < 0) || (rsc >= NR_RSCS))
+ return;
+ mutex_lock(&crw_handler_mutex);
+ crw_handlers[rsc] = NULL;
+ mutex_unlock(&crw_handler_mutex);
+}
+
+/*
+ * Retrieve CRWs and call function to handle event.
+ */
+static int crw_collect_info(void *unused)
+{
+ struct crw crw[2];
+ int ccode;
+ unsigned int chain;
+ int ignore;
+
+repeat:
+ ignore = down_interruptible(&crw_semaphore);
+ chain = 0;
+ while (1) {
+ crw_handler_t handler;
+
+ if (unlikely(chain > 1)) {
+ struct crw tmp_crw;
+
+ printk(KERN_WARNING"%s: Code does not support more "
+ "than two chained crws; please report to "
+ "linux390@de.ibm.com!\n", __func__);
+ ccode = stcrw(&tmp_crw);
+ printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ __func__, tmp_crw.slct, tmp_crw.oflw,
+ tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
+ tmp_crw.erc, tmp_crw.rsid);
+ printk(KERN_WARNING"%s: This was crw number %x in the "
+ "chain\n", __func__, chain);
+ if (ccode != 0)
+ break;
+ chain = tmp_crw.chn ? chain + 1 : 0;
+ continue;
+ }
+ ccode = stcrw(&crw[chain]);
+ if (ccode != 0)
+ break;
+ printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw[chain].slct, crw[chain].oflw, crw[chain].chn,
+ crw[chain].rsc, crw[chain].anc, crw[chain].erc,
+ crw[chain].rsid);
+ /* Check for overflows. */
+ if (crw[chain].oflw) {
+ int i;
+
+ pr_debug("%s: crw overflow detected!\n", __func__);
+ mutex_lock(&crw_handler_mutex);
+ for (i = 0; i < NR_RSCS; i++) {
+ if (crw_handlers[i])
+ crw_handlers[i](NULL, NULL, 1);
+ }
+ mutex_unlock(&crw_handler_mutex);
+ chain = 0;
+ continue;
+ }
+ if (crw[0].chn && !chain) {
+ chain++;
+ continue;
+ }
+ mutex_lock(&crw_handler_mutex);
+ handler = crw_handlers[crw[chain].rsc];
+ if (handler)
+ handler(&crw[0], chain ? &crw[1] : NULL, 0);
+ mutex_unlock(&crw_handler_mutex);
+ /* chain is always 0 or 1 here. */
+ chain = crw[chain].chn ? chain + 1 : 0;
+ }
+ goto repeat;
+ return 0;
+}
+
+void crw_handle_channel_report(void)
+{
+ up(&crw_semaphore);
+}
+
+/*
+ * Separate initcall needed for semaphore initialization since
+ * crw_handle_channel_report might be called before crw_machine_check_init.
+ */
+static int __init crw_init_semaphore(void)
+{
+ init_MUTEX_LOCKED(&crw_semaphore);
+ return 0;
+}
+pure_initcall(crw_init_semaphore);
+
+/*
+ * Machine checks for the channel subsystem must be enabled
+ * after the channel subsystem is initialized
+ */
+static int __init crw_machine_check_init(void)
+{
+ struct task_struct *task;
+
+ task = kthread_run(crw_collect_info, NULL, "kmcheck");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ ctl_set_bit(14, 28); /* enable channel report MCH */
+ return 0;
+}
+device_initcall(crw_machine_check_init);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 8019288bc6de..0085d8901792 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -18,8 +18,8 @@
#include <linux/list.h>
#include <linux/reboot.h>
#include <asm/isc.h>
+#include <asm/crw.h>
-#include "../s390mach.h"
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
@@ -83,6 +83,25 @@ static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
return rc;
}
+static int call_fn_all_sch(struct subchannel_id schid, void *data)
+{
+ struct cb_data *cb = data;
+ struct subchannel *sch;
+ int rc = 0;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ if (cb->fn_known_sch)
+ rc = cb->fn_known_sch(sch, cb->data);
+ put_device(&sch->dev);
+ } else {
+ if (cb->fn_unknown_sch)
+ rc = cb->fn_unknown_sch(schid, cb->data);
+ }
+
+ return rc;
+}
+
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data)
@@ -90,13 +109,17 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
struct cb_data cb;
int rc;
- cb.set = idset_sch_new();
- if (!cb.set)
- return -ENOMEM;
- idset_fill(cb.set);
cb.data = data;
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
+
+ cb.set = idset_sch_new();
+ if (!cb.set)
+ /* fall back to brute force scanning in case of oom */
+ return for_each_subchannel(call_fn_all_sch, &cb);
+
+ idset_fill(cb.set);
+
/* Process registered subchannels. */
rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
if (rc)
@@ -272,7 +295,7 @@ static int css_register_subchannel(struct subchannel *sch)
* the subchannel driver can decide itself when it wants to inform
* userspace of its existence.
*/
- sch->dev.uevent_suppress = 1;
+ dev_set_uevent_suppress(&sch->dev, 1);
css_update_ssd_info(sch);
/* make it known to the system */
ret = css_sch_device_register(sch);
@@ -287,7 +310,7 @@ static int css_register_subchannel(struct subchannel *sch)
* a fitting driver module may be loaded based on the
* modalias.
*/
- sch->dev.uevent_suppress = 0;
+ dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
return ret;
@@ -510,6 +533,17 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
return ret;
}
+static void reprobe_after_idle(struct work_struct *unused)
+{
+ /* Make sure initial subchannel scan is done. */
+ wait_event(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ if (need_reprobe)
+ css_schedule_reprobe();
+}
+
+static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
+
/* Work function used to reprobe all unregistered subchannels. */
static void reprobe_all(struct work_struct *unused)
{
@@ -517,10 +551,12 @@ static void reprobe_all(struct work_struct *unused)
CIO_MSG_EVENT(4, "reprobe start\n");
- need_reprobe = 0;
/* Make sure initial subchannel scan is done. */
- wait_event(ccw_device_init_wq,
- atomic_read(&ccw_device_init_count) == 0);
+ if (atomic_read(&ccw_device_init_count) != 0) {
+ queue_work(ccw_device_work, &reprobe_idle_work);
+ return;
+ }
+ need_reprobe = 0;
ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
@@ -619,7 +655,7 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
} else {
#ifdef CONFIG_SMP
- css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
+ css->global_pgid.pgid_high.cpu_addr = stap();
#else
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
@@ -765,7 +801,7 @@ init_channel_subsystem (void)
if (ret)
goto out;
- ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw);
+ ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
if (ret)
goto out;
@@ -845,7 +881,7 @@ out_unregister:
out_bus:
bus_unregister(&css_bus_type);
out:
- s390_unregister_crw_handler(CRW_RSC_CSS);
+ crw_unregister_handler(CRW_RSC_CSS);
chsc_free_sei_area();
kfree(slow_subchannel_set);
pr_alert("The CSS device driver initialization failed with "
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 23d5752349b5..35441fa16be1 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -310,8 +310,6 @@ static void ccw_device_remove_orphan_cb(struct work_struct *work)
put_device(&cdev->dev);
}
-static void ccw_device_call_sch_unregister(struct work_struct *work);
-
static void
ccw_device_remove_disconnected(struct ccw_device *cdev)
{
@@ -335,11 +333,10 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
spin_unlock_irqrestore(cdev->ccwlock, flags);
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_remove_orphan_cb);
+ queue_work(slow_path_wq, &cdev->private->kick_work);
} else
/* Deregister subchannel, which will kill the ccw device. */
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(slow_path_wq, &cdev->private->kick_work);
+ ccw_device_schedule_sch_unregister(cdev);
}
/**
@@ -457,12 +454,13 @@ int ccw_device_set_online(struct ccw_device *cdev)
return (ret == 0) ? -ENODEV : ret;
}
-static void online_store_handle_offline(struct ccw_device *cdev)
+static int online_store_handle_offline(struct ccw_device *cdev)
{
if (cdev->private->state == DEV_STATE_DISCONNECTED)
ccw_device_remove_disconnected(cdev);
- else if (cdev->drv && cdev->drv->set_offline)
- ccw_device_set_offline(cdev);
+ else if (cdev->online && cdev->drv && cdev->drv->set_offline)
+ return ccw_device_set_offline(cdev);
+ return 0;
}
static int online_store_recog_and_online(struct ccw_device *cdev)
@@ -470,7 +468,7 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
int ret;
/* Do device recognition, if needed. */
- if (cdev->id.cu_type == 0) {
+ if (cdev->private->state == DEV_STATE_BOXED) {
ret = ccw_device_recognition(cdev);
if (ret) {
CIO_MSG_EVENT(0, "Couldn't start recognition "
@@ -481,17 +479,21 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
}
wait_event(cdev->private->wait_q,
cdev->private->flags.recog_done);
+ if (cdev->private->state != DEV_STATE_OFFLINE)
+ /* recognition failed */
+ return -EAGAIN;
}
if (cdev->drv && cdev->drv->set_online)
ccw_device_set_online(cdev);
return 0;
}
+
static int online_store_handle_online(struct ccw_device *cdev, int force)
{
int ret;
ret = online_store_recog_and_online(cdev);
- if (ret)
+ if (ret && !force)
return ret;
if (force && cdev->private->state == DEV_STATE_BOXED) {
ret = ccw_device_stlck(cdev);
@@ -499,7 +501,9 @@ static int online_store_handle_online(struct ccw_device *cdev, int force)
return ret;
if (cdev->id.cu_type == 0)
cdev->private->state = DEV_STATE_NOT_OPER;
- online_store_recog_and_online(cdev);
+ ret = online_store_recog_and_online(cdev);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -511,7 +515,11 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
int force, ret;
unsigned long i;
- if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
+ if ((cdev->private->state != DEV_STATE_OFFLINE &&
+ cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_BOXED &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) ||
+ atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -530,13 +538,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
goto out;
switch (i) {
case 0:
- online_store_handle_offline(cdev);
- ret = count;
+ ret = online_store_handle_offline(cdev);
break;
case 1:
ret = online_store_handle_online(cdev, force);
- if (!ret)
- ret = count;
break;
default:
ret = -EINVAL;
@@ -545,7 +550,7 @@ out:
if (cdev->drv)
module_put(cdev->drv->owner);
atomic_set(&cdev->private->onoff, 0);
- return ret;
+ return (ret < 0) ? ret : count;
}
static ssize_t
@@ -681,35 +686,22 @@ get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
return dev ? to_ccwdev(dev) : NULL;
}
-static void
-ccw_device_add_changed(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- if (device_add(&cdev->dev)) {
- put_device(&cdev->dev);
- return;
- }
- set_bit(1, &cdev->private->registered);
-}
-
-void ccw_device_do_unreg_rereg(struct work_struct *work)
+void ccw_device_do_unbind_bind(struct work_struct *work)
{
struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
+ int ret;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
- ccw_device_unregister(cdev);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_add_changed);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ if (test_bit(1, &cdev->private->registered)) {
+ device_release_driver(&cdev->dev);
+ ret = device_attach(&cdev->dev);
+ WARN_ON(ret == -ENODEV);
+ }
}
static void
@@ -799,7 +791,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch,
return;
other_sch = to_subchannel(cdev->dev.parent);
/* Note: device_move() changes cdev->dev.parent */
- ret = device_move(&cdev->dev, &sch->dev);
+ ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
if (ret) {
CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
"(ret=%d)!\n", cdev->private->dev_id.ssid,
@@ -830,7 +822,7 @@ static void sch_attach_orphaned_device(struct subchannel *sch,
* Try to move the ccw device to its new subchannel.
* Note: device_move() changes cdev->dev.parent
*/
- ret = device_move(&cdev->dev, &sch->dev);
+ ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
if (ret) {
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
"failed (ret=%d)!\n",
@@ -897,7 +889,8 @@ void ccw_device_move_to_orphanage(struct work_struct *work)
* ccw device can take its place on the subchannel.
* Note: device_move() changes cdev->dev.parent
*/
- ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
+ ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
+ DPM_ORDER_NONE);
if (ret) {
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
"(ret=%d)!\n", cdev->private->dev_id.ssid,
@@ -981,7 +974,7 @@ io_subchannel_register(struct work_struct *work)
* Now we know this subchannel will stay, we can throw
* our delayed uevent.
*/
- sch->dev.uevent_suppress = 0;
+ dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
/* make it known to the system */
ret = ccw_device_register(cdev);
@@ -1028,33 +1021,35 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
put_device(&sch->dev);
}
+void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
+{
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_call_sch_unregister);
+ queue_work(slow_path_wq, &cdev->private->kick_work);
+}
+
/*
* subchannel recognition done. Called from the state machine.
*/
void
io_subchannel_recog_done(struct ccw_device *cdev)
{
- struct subchannel *sch;
-
if (css_init_done == 0) {
cdev->private->flags.recog_done = 1;
return;
}
switch (cdev->private->state) {
+ case DEV_STATE_BOXED:
+ /* Device did not respond in time. */
case DEV_STATE_NOT_OPER:
cdev->private->flags.recog_done = 1;
/* Remove device found not operational. */
if (!get_device(&cdev->dev))
break;
- sch = to_subchannel(cdev->dev.parent);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(slow_path_wq, &cdev->private->kick_work);
+ ccw_device_schedule_sch_unregister(cdev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
break;
- case DEV_STATE_BOXED:
- /* Device did not respond in time. */
case DEV_STATE_OFFLINE:
/*
* We can't register the device in interrupt context so
@@ -1129,7 +1124,7 @@ static void ccw_device_move_to_sch(struct work_struct *work)
* Try to move the ccw device to its new subchannel.
* Note: device_move() changes cdev->dev.parent
*/
- rc = device_move(&cdev->dev, &sch->dev);
+ rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
mutex_unlock(&sch->reg_mutex);
if (rc) {
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
@@ -1243,7 +1238,7 @@ static int io_subchannel_probe(struct subchannel *sch)
* the ccw_device and exit. This happens for all early
* devices, e.g. the console.
*/
- sch->dev.uevent_suppress = 0;
+ dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
cdev->dev.groups = ccwdev_attr_groups;
device_initialize(&cdev->dev);
@@ -1568,8 +1563,7 @@ static int purge_fn(struct device *dev, void *data)
goto out;
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
priv->dev_id.devno);
- PREPARE_WORK(&cdev->private->kick_work, ccw_device_call_sch_unregister);
- queue_work(slow_path_wq, &cdev->private->kick_work);
+ ccw_device_schedule_sch_unregister(cdev);
out:
/* Abort loop in case of pending signal. */
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 0f2e63ea48de..f1cbbd94ad4e 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -80,13 +80,14 @@ void io_subchannel_init_config(struct subchannel *sch);
int ccw_device_cancel_halt_clear(struct ccw_device *);
-void ccw_device_do_unreg_rereg(struct work_struct *);
+void ccw_device_do_unbind_bind(struct work_struct *);
void ccw_device_move_to_orphanage(struct work_struct *);
int ccw_device_is_orphan(struct ccw_device *);
int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
+void ccw_device_schedule_sch_unregister(struct ccw_device *);
int ccw_purge_blacklisted(void);
/* Function prototypes for device status and basic sense stuff. */
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8df5eaafc5ab..e46049261561 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -194,7 +194,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
cdev->id.dev_type != cdev->private->senseid.dev_type ||
cdev->id.dev_model != cdev->private->senseid.dev_model) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_do_unreg_rereg);
+ ccw_device_do_unbind_bind);
queue_work(ccw_device_work, &cdev->private->kick_work);
return 0;
}
@@ -256,13 +256,12 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
old_lpm = 0;
if (sch->lpm != old_lpm)
__recover_lost_chpids(sch, old_lpm);
- if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
- if (state == DEV_STATE_NOT_OPER) {
- cdev->private->flags.recog_done = 1;
- cdev->private->state = DEV_STATE_DISCONNECTED;
- return;
- }
- /* Boxed devices don't need extra treatment. */
+ if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
+ (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
+ cdev->private->flags.recog_done = 1;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ wake_up(&cdev->private->wait_q);
+ return;
}
notify = 0;
same_dev = 0; /* Keep the compiler quiet... */
@@ -274,7 +273,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
- if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
+ if (cdev->online) {
same_dev = ccw_device_handle_oper(cdev);
notify = 1;
}
@@ -307,12 +306,17 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
" subchannel 0.%x.%04x\n",
cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
+ if (cdev->id.cu_type != 0) { /* device was recognized before */
+ cdev->private->flags.recog_done = 1;
+ cdev->private->state = DEV_STATE_BOXED;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
break;
}
cdev->private->state = state;
io_subchannel_recog_done(cdev);
- if (state != DEV_STATE_NOT_OPER)
- wake_up(&cdev->private->wait_q);
+ wake_up(&cdev->private->wait_q);
}
/*
@@ -366,7 +370,7 @@ static void ccw_device_oper_notify(struct ccw_device *cdev)
}
/* Driver doesn't want device back. */
ccw_device_set_notoper(cdev);
- PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unreg_rereg);
+ PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
@@ -390,10 +394,13 @@ ccw_device_done(struct ccw_device *cdev, int state)
cdev->private->state = state;
-
- if (state == DEV_STATE_BOXED)
+ if (state == DEV_STATE_BOXED) {
CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
+ if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
+ ccw_device_schedule_sch_unregister(cdev);
+ cdev->private->flags.donotify = 0;
+ }
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
@@ -728,7 +735,7 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev,
{
struct subchannel *sch;
- cdev->private->state = DEV_STATE_NOT_OPER;
+ ccw_device_set_notoper(cdev);
sch = to_subchannel(cdev->dev.parent);
css_schedule_eval(sch->schid);
}
@@ -1052,7 +1059,7 @@ ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
/*
* An interrupt in state offline means a previous disable was not
- * successful. Try again.
+ * successful - should not happen, but we try to disable again.
*/
cio_disable_subchannel(sch);
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index eabcc42d63df..151754d54745 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -680,7 +680,7 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
if (!scsw_is_tm(&sch->schib.scsw) ||
- !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND))
+ !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
return -EINVAL;
return cio_tm_intrg(sch);
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 42f2b09631b6..13bcb8114388 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -186,6 +186,9 @@ struct qdio_input_q {
/* input buffer acknowledgement flag */
int polling;
+ /* first ACK'ed buffer */
+ int ack_start;
+
/* how much sbals are acknowledged with qebsm */
int ack_count;
@@ -234,7 +237,7 @@ struct qdio_q {
int first_to_check;
/* first_to_check of the last time */
- int last_move_ftc;
+ int last_move;
/* beginning position for calling the program */
int first_to_kick;
@@ -244,7 +247,6 @@ struct qdio_q {
struct qdio_irq *irq_ptr;
struct tasklet_struct tasklet;
- spinlock_t lock;
/* error condition during a data transfer */
unsigned int qdio_error;
@@ -354,7 +356,7 @@ int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
int auto_ack);
void qdio_check_outbound_after_thinint(struct qdio_q *q);
int qdio_inbound_q_moved(struct qdio_q *q);
-void qdio_kick_inbound_handler(struct qdio_q *q);
+void qdio_kick_handler(struct qdio_q *q);
void qdio_stop_polling(struct qdio_q *q);
int qdio_siga_sync_q(struct qdio_q *q);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index da7afb04e71f..e3434b34f86c 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -63,8 +63,9 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci);
seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
seq_printf(m, "ftc: %d\n", q->first_to_check);
- seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc);
+ seq_printf(m, "last_move: %d\n", q->last_move);
seq_printf(m, "polling: %d\n", q->u.in.polling);
+ seq_printf(m, "ack start: %d\n", q->u.in.ack_start);
seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
seq_printf(m, "slsb buffer states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 10cb0f8726e5..9e8a2914259b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -380,11 +380,11 @@ inline void qdio_stop_polling(struct qdio_q *q)
/* show the card that we are not polling anymore */
if (is_qebsm(q)) {
- set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
+ set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.ack_count);
q->u.in.ack_count = 0;
} else
- set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
+ set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
}
static void announce_buffer_error(struct qdio_q *q, int count)
@@ -419,15 +419,15 @@ static inline void inbound_primed(struct qdio_q *q, int count)
if (!q->u.in.polling) {
q->u.in.polling = 1;
q->u.in.ack_count = count;
- q->last_move_ftc = q->first_to_check;
+ q->u.in.ack_start = q->first_to_check;
return;
}
/* delete the previous ACK's */
- set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
+ set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.ack_count);
q->u.in.ack_count = count;
- q->last_move_ftc = q->first_to_check;
+ q->u.in.ack_start = q->first_to_check;
return;
}
@@ -439,14 +439,13 @@ static inline void inbound_primed(struct qdio_q *q, int count)
if (q->u.in.polling) {
/* reset the previous ACK but first set the new one */
set_buf_state(q, new, SLSB_P_INPUT_ACK);
- set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
- }
- else {
+ set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+ } else {
q->u.in.polling = 1;
- set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK);
+ set_buf_state(q, new, SLSB_P_INPUT_ACK);
}
- q->last_move_ftc = new;
+ q->u.in.ack_start = new;
count--;
if (!count)
return;
@@ -455,7 +454,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
* Need to change all PRIMED buffers to NOT_INIT, otherwise
* we're loosing initiative in the thinint code.
*/
- set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT,
+ set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
count);
}
@@ -523,7 +522,8 @@ int qdio_inbound_q_moved(struct qdio_q *q)
bufnr = get_inbound_buffer_frontier(q);
- if ((bufnr != q->last_move_ftc) || q->qdio_error) {
+ if ((bufnr != q->last_move) || q->qdio_error) {
+ q->last_move = bufnr;
if (!need_siga_sync(q) && !pci_out_supported(q))
q->u.in.timestamp = get_usecs();
@@ -570,29 +570,30 @@ static int qdio_inbound_q_done(struct qdio_q *q)
}
}
-void qdio_kick_inbound_handler(struct qdio_q *q)
+void qdio_kick_handler(struct qdio_q *q)
{
- int count, start, end;
-
- qdio_perf_stat_inc(&perf_stats.inbound_handler);
-
- start = q->first_to_kick;
- end = q->first_to_check;
- if (end >= start)
- count = end - start;
- else
- count = end + QDIO_MAX_BUFFERS_PER_Q - start;
-
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
+ int start = q->first_to_kick;
+ int end = q->first_to_check;
+ int count;
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
- q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
- start, count, q->irq_ptr->int_parm);
+ count = sub_buf(end, start);
+
+ if (q->is_input_q) {
+ qdio_perf_stat_inc(&perf_stats.inbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
+ } else {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: nr:%1d", q->nr);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
+ }
+
+ q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
+ q->irq_ptr->int_parm);
/* for the next time */
- q->first_to_kick = q->first_to_check;
+ q->first_to_kick = end;
q->qdio_error = 0;
}
@@ -603,7 +604,7 @@ again:
if (!qdio_inbound_q_moved(q))
return;
- qdio_kick_inbound_handler(q);
+ qdio_kick_handler(q);
if (!qdio_inbound_q_done(q))
/* means poll time is not yet over */
@@ -698,21 +699,21 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
bufnr = get_outbound_buffer_frontier(q);
- if ((bufnr != q->last_move_ftc) || q->qdio_error) {
- q->last_move_ftc = bufnr;
+ if ((bufnr != q->last_move) || q->qdio_error) {
+ q->last_move = bufnr;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
return 1;
} else
return 0;
}
-static void qdio_kick_outbound_q(struct qdio_q *q)
+static int qdio_kick_outbound_q(struct qdio_q *q)
{
unsigned int busy_bit;
int cc;
if (!need_siga_out(q))
- return;
+ return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
qdio_perf_stat_inc(&perf_stats.siga_out);
@@ -724,75 +725,37 @@ static void qdio_kick_outbound_q(struct qdio_q *q)
case 2:
if (busy_bit) {
DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
- q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY;
- } else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d",
- q->nr);
- q->qdio_error = cc;
- }
+ cc |= QDIO_ERROR_SIGA_BUSY;
+ } else
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
break;
case 1:
case 3:
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
- q->qdio_error = cc;
break;
}
-}
-
-static void qdio_kick_outbound_handler(struct qdio_q *q)
-{
- int start, end, count;
-
- start = q->first_to_kick;
- end = q->last_move_ftc;
- if (end >= start)
- count = end - start;
- else
- count = end + QDIO_MAX_BUFFERS_PER_Q - start;
-
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
-
- if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
- return;
-
- q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
- q->irq_ptr->int_parm);
-
- /* for the next time: */
- q->first_to_kick = q->last_move_ftc;
- q->qdio_error = 0;
+ return cc;
}
static void __qdio_outbound_processing(struct qdio_q *q)
{
- unsigned long flags;
-
qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
- spin_lock_irqsave(&q->lock, flags);
-
BUG_ON(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
- qdio_kick_outbound_handler(q);
-
- spin_unlock_irqrestore(&q->lock, flags);
+ qdio_kick_handler(q);
- if (queue_type(q) == QDIO_ZFCP_QFMT) {
+ if (queue_type(q) == QDIO_ZFCP_QFMT)
if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
- tasklet_schedule(&q->tasklet);
- return;
- }
+ goto sched;
/* bail out for HiperSockets unicast queues */
if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
return;
if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
- (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) {
- tasklet_schedule(&q->tasklet);
- return;
- }
+ (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
+ goto sched;
if (q->u.out.pci_out_enabled)
return;
@@ -810,6 +773,12 @@ static void __qdio_outbound_processing(struct qdio_q *q)
qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
}
}
+ return;
+
+sched:
+ if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+ return;
+ tasklet_schedule(&q->tasklet);
}
/* outbound tasklet */
@@ -822,6 +791,9 @@ void qdio_outbound_processing(unsigned long data)
void qdio_outbound_timer(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
+
+ if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+ return;
tasklet_schedule(&q->tasklet);
}
@@ -863,6 +835,9 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
int i;
struct qdio_q *q;
+ if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+ return;
+
qdio_perf_stat_inc(&perf_stats.pci_int);
for_each_input_queue(irq_ptr, q, i)
@@ -1065,8 +1040,9 @@ EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
* @cdev: associated ccw device
* @how: use halt or clear to shutdown
*
- * This function calls qdio_shutdown() for @cdev with method @how
- * and on success qdio_free() for @cdev.
+ * This function calls qdio_shutdown() for @cdev with method @how.
+ * and qdio_free(). The qdio_free() return value is ignored since
+ * !irq_ptr is already checked.
*/
int qdio_cleanup(struct ccw_device *cdev, int how)
{
@@ -1077,8 +1053,8 @@ int qdio_cleanup(struct ccw_device *cdev, int how)
return -ENODEV;
rc = qdio_shutdown(cdev, how);
- if (rc == 0)
- rc = qdio_free(cdev);
+
+ qdio_free(cdev);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_cleanup);
@@ -1090,11 +1066,11 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
int i;
for_each_input_queue(irq_ptr, q, i)
- tasklet_disable(&q->tasklet);
+ tasklet_kill(&q->tasklet);
for_each_output_queue(irq_ptr, q, i) {
- tasklet_disable(&q->tasklet);
del_timer(&q->u.out.timer);
+ tasklet_kill(&q->tasklet);
}
}
@@ -1112,6 +1088,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
if (!irq_ptr)
return -ENODEV;
+ BUG_ON(irqs_disabled());
DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
@@ -1124,6 +1101,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
return 0;
}
+ /*
+ * Indicate that the device is going down. Scheduling the queue
+ * tasklets is forbidden from here on.
+ */
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+
tiqdio_remove_input_queues(irq_ptr);
qdio_shutdown_queues(cdev);
qdio_shutdown_debug_entries(irq_ptr, cdev);
@@ -1403,9 +1386,8 @@ int qdio_activate(struct ccw_device *cdev)
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_STOPPED:
case QDIO_IRQ_STATE_ERR:
- mutex_unlock(&irq_ptr->setup_mutex);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
- return -EIO;
+ rc = -EIO;
+ break;
default:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
rc = 0;
@@ -1442,10 +1424,10 @@ static inline int buf_in_between(int bufnr, int start, int count)
* @bufnr: first buffer to process
* @count: how many buffers are emptied
*/
-static void handle_inbound(struct qdio_q *q, unsigned int callflags,
- int bufnr, int count)
+static int handle_inbound(struct qdio_q *q, unsigned int callflags,
+ int bufnr, int count)
{
- int used, cc, diff;
+ int used, diff;
if (!q->u.in.polling)
goto set;
@@ -1456,19 +1438,18 @@ static void handle_inbound(struct qdio_q *q, unsigned int callflags,
q->u.in.polling = 0;
q->u.in.ack_count = 0;
goto set;
- } else if (buf_in_between(q->last_move_ftc, bufnr, count)) {
+ } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
if (is_qebsm(q)) {
- /* partial overwrite, just update last_move_ftc */
+ /* partial overwrite, just update ack_start */
diff = add_buf(bufnr, count);
- diff = sub_buf(diff, q->last_move_ftc);
+ diff = sub_buf(diff, q->u.in.ack_start);
q->u.in.ack_count -= diff;
if (q->u.in.ack_count <= 0) {
q->u.in.polling = 0;
q->u.in.ack_count = 0;
- /* TODO: must we set last_move_ftc to something meaningful? */
goto set;
}
- q->last_move_ftc = add_buf(q->last_move_ftc, diff);
+ q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
}
else
/* the only ACK will be deleted, so stop polling */
@@ -1483,13 +1464,11 @@ set:
/* no need to signal as long as the adapter had free buffers */
if (used)
- return;
+ return 0;
- if (need_siga_in(q)) {
- cc = qdio_siga_input(q);
- if (cc)
- q->qdio_error = cc;
- }
+ if (need_siga_in(q))
+ return qdio_siga_input(q);
+ return 0;
}
/**
@@ -1499,11 +1478,11 @@ set:
* @bufnr: first buffer to process
* @count: how many buffers are filled
*/
-static void handle_outbound(struct qdio_q *q, unsigned int callflags,
- int bufnr, int count)
+static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+ int bufnr, int count)
{
unsigned char state;
- int used;
+ int used, rc = 0;
qdio_perf_stat_inc(&perf_stats.outbound_handler);
@@ -1518,27 +1497,26 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
if (queue_type(q) == QDIO_IQDIO_QFMT) {
if (multicast_outbound(q))
- qdio_kick_outbound_q(q);
+ rc = qdio_kick_outbound_q(q);
else
if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
(count > 1) &&
(count <= q->irq_ptr->ssqd_desc.mmwc)) {
/* exploit enhanced SIGA */
q->u.out.use_enh_siga = 1;
- qdio_kick_outbound_q(q);
+ rc = qdio_kick_outbound_q(q);
} else {
/*
* One siga-w per buffer required for unicast
* HiperSockets.
*/
q->u.out.use_enh_siga = 0;
- while (count--)
- qdio_kick_outbound_q(q);
+ while (count--) {
+ rc = qdio_kick_outbound_q(q);
+ if (rc)
+ goto out;
+ }
}
-
- /* report CC=2 conditions synchronously */
- if (q->qdio_error)
- __qdio_outbound_processing(q);
goto out;
}
@@ -1550,14 +1528,14 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
/* try to fast requeue buffers */
get_buf_state(q, prev_buf(bufnr), &state, 0);
if (state != SLSB_CU_OUTPUT_PRIMED)
- qdio_kick_outbound_q(q);
+ rc = qdio_kick_outbound_q(q);
else {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
qdio_perf_stat_inc(&perf_stats.fast_requeue);
}
out:
- /* Fixme: could wait forever if called from process context */
tasklet_schedule(&q->tasklet);
+ return rc;
}
/**
@@ -1596,14 +1574,12 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
return -EBUSY;
if (callflags & QDIO_FLAG_SYNC_INPUT)
- handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr,
- count);
+ return handle_inbound(irq_ptr->input_qs[q_nr],
+ callflags, bufnr, count);
else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
- handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr,
- count);
- else
- return -EINVAL;
- return 0;
+ return handle_outbound(irq_ptr->output_qs[q_nr],
+ callflags, bufnr, count);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(do_QDIO);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index c08356b95bf5..18d54fc21ce9 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -117,7 +117,6 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
- spin_lock_init(&q->lock);
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8e90e147b746..c655d011a78d 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -31,6 +31,7 @@
/* list of thin interrupt input queues */
static LIST_HEAD(tiq_list);
+DEFINE_MUTEX(tiq_list_lock);
/* adapter local summary indicator */
static unsigned char *tiqdio_alsi;
@@ -95,12 +96,11 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
css_qdio_omit_svs = 1;
- for_each_input_queue(irq_ptr, q, i) {
+ mutex_lock(&tiq_list_lock);
+ for_each_input_queue(irq_ptr, q, i)
list_add_rcu(&q->entry, &tiq_list);
- synchronize_rcu();
- }
+ mutex_unlock(&tiq_list_lock);
xchg(irq_ptr->dsci, 1);
- tasklet_schedule(&tiqdio_tasklet);
}
/*
@@ -118,7 +118,10 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
/* if establish triggered an error */
if (!q || !q->entry.prev || !q->entry.next)
continue;
+
+ mutex_lock(&tiq_list_lock);
list_del_rcu(&q->entry);
+ mutex_unlock(&tiq_list_lock);
synchronize_rcu();
}
}
@@ -155,15 +158,15 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
*/
qdio_check_outbound_after_thinint(q);
-again:
if (!qdio_inbound_q_moved(q))
return;
- qdio_kick_inbound_handler(q);
+ qdio_kick_handler(q);
if (!tiqdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
- goto again;
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
}
qdio_stop_polling(q);
@@ -173,7 +176,8 @@ again:
*/
if (!tiqdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
- goto again;
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
}
}
@@ -366,10 +370,11 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
void __exit tiqdio_unregister_thinints(void)
{
- tasklet_disable(&tiqdio_tasklet);
+ WARN_ON(!list_empty(&tiq_list));
if (tiqdio_alsi) {
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC);
}
+ tasklet_kill(&tiqdio_tasklet);
}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index cb22b97944b8..65b6a96afe6b 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -128,8 +128,7 @@ static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
if (l == zdev->list.prev)
return;
/* Move zdev behind l */
- list_del(&zdev->list);
- list_add(&zdev->list, l);
+ list_move(&zdev->list, l);
}
/**
@@ -157,8 +156,7 @@ static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
if (l == zdev->list.next)
return;
/* Move zdev before l */
- list_del(&zdev->list);
- list_add_tail(&zdev->list, l);
+ list_move_tail(&zdev->list, l);
}
static void zcrypt_device_release(struct kref *kref)
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index e7a1e22e77ac..c20d4790258e 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -781,8 +781,7 @@ static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
/* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg);
out_free:
- memset(ap_msg.message, 0x0, ap_msg.length);
- kfree(ap_msg.message);
+ kzfree(ap_msg.message);
return rc;
}
diff --git a/drivers/s390/ebcdic.c b/drivers/s390/ebcdic.c
deleted file mode 100644
index 99c98da15473..000000000000
--- a/drivers/s390/ebcdic.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * arch/s390/kernel/ebcdic.c
- * ECBDIC -> ASCII, ASCII -> ECBDIC conversion tables.
- *
- * S390 version
- * Copyright (C) 1998 IBM Corporation
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <asm/types.h>
-
-/*
- * ASCII -> EBCDIC
- */
-__u8 _ascebc[256] =
-{
- /*00 NL SH SX EX ET NQ AK BL */
- 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
- /*08 BS HT LF VT FF CR SO SI */
- 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- /*10 DL D1 D2 D3 D4 NK SN EB */
- 0x10, 0x11, 0x12, 0x13, 0x3C, 0x15, 0x32, 0x26,
- /*18 CN EM SB EC FS GS RS US */
- 0x18, 0x19, 0x3F, 0x27, 0x1C, 0x1D, 0x1E, 0x1F,
- /*20 SP ! " # $ % & ' */
- 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
- /*28 ( ) * + , - . / */
- 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
- /*30 0 1 2 3 4 5 6 7 */
- 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
- /*38 8 9 : ; < = > ? */
- 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
- /*40 @ A B C D E F G */
- 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
- /*48 H I J K L M N O */
- 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
- /*50 P Q R S T U V W */
- 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
- /*58 X Y Z [ \ ] ^ _ */
- 0xE7, 0xE8, 0xE9, 0xAD, 0xE0, 0xBD, 0x5F, 0x6D,
- /*60 ` a b c d e f g */
- 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
- /*68 h i j k l m n o */
- 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
- /*70 p q r s t u v w */
- 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
- /*78 x y z { | } ~ DL */
- 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
- 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0xFF
-};
-
-/*
- * EBCDIC -> ASCII
- */
-__u8 _ebcasc[256] =
-{
- /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
- 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
- /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
- 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
- -ENP ->LF */
- 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
- /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
- -IUS */
- 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
- -INP */
- 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
- /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
- -SW */
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
- /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
- 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
- /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
- 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
- /* 0x40 SP RSP ä ---- */
- 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
- /* 0x48 . < ( + | */
- 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
- /* 0x50 & ---- */
- 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
- /* 0x58 ß ! $ * ) ; */
- 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
- /* 0x60 - / ---- Ä ---- ---- ---- */
- 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
- /* 0x68 ---- , % _ > ? */
- 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
- /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
- 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- /* 0x78 * ` : # @ ' = " */
- 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
- /* 0x80 * a b c d e f g */
- 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- /* 0x88 h i ---- ---- ---- */
- 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
- /* 0x90 ° j k l m n o p */
- 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
- /* 0x98 q r ---- ---- */
- 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
- /* 0xA0 ~ s t u v w x */
- 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
- /* 0xA8 y z ---- ---- ---- ---- */
- 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
- /* 0xB0 ^ ---- § ---- */
- 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
- /* 0xB8 ---- [ ] ---- ---- ---- ---- */
- 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
- /* 0xC0 { A B C D E F G */
- 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
- /* 0xC8 H I ---- ö ---- */
- 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
- /* 0xD0 } J K L M N O P */
- 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
- /* 0xD8 Q R ---- ü */
- 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
- /* 0xE0 \ S T U V W X */
- 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
- /* 0xE8 Y Z ---- Ö ---- ---- ---- */
- 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
- /* 0xF0 0 1 2 3 4 5 6 7 */
- 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
- /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
- 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
-};
-
-/*
- * EBCDIC (capitals) -> ASCII (small case)
- */
-__u8 _ebcasc_reduce_case[256] =
-{
- /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
- 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
-
- /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
- 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-
- /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
- -ENP ->LF */
- 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
-
- /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
- -IUS */
- 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
-
- /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
- -INP */
- 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
-
- /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
- -SW */
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
-
- /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
- 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
-
- /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
- 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
-
- /* 0x40 SP RSP ä ---- */
- 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
-
- /* 0x48 . < ( + | */
- 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
-
- /* 0x50 & ---- */
- 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
-
- /* 0x58 ß ! $ * ) ; */
- 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
-
- /* 0x60 - / ---- Ä ---- ---- ---- */
- 0x2D, 0x2F, 0x07, 0x84, 0x07, 0x07, 0x07, 0x8F,
-
- /* 0x68 ---- , % _ > ? */
- 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
-
- /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
- 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
-
- /* 0x78 * ` : # @ ' = " */
- 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
-
- /* 0x80 * a b c d e f g */
- 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
-
- /* 0x88 h i ---- ---- ---- */
- 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
-
- /* 0x90 ° j k l m n o p */
- 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
-
- /* 0x98 q r ---- ---- */
- 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
-
- /* 0xA0 ~ s t u v w x */
- 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
-
- /* 0xA8 y z ---- ---- ---- ---- */
- 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
-
- /* 0xB0 ^ ---- § ---- */
- 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
-
- /* 0xB8 ---- [ ] ---- ---- ---- ---- */
- 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
-
- /* 0xC0 { A B C D E F G */
- 0x7B, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
-
- /* 0xC8 H I ---- ö ---- */
- 0x68, 0x69, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
-
- /* 0xD0 } J K L M N O P */
- 0x7D, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
-
- /* 0xD8 Q R ---- ü */
- 0x71, 0x72, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
-
- /* 0xE0 \ S T U V W X */
- 0x5C, 0xF6, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
-
- /* 0xE8 Y Z ---- Ö ---- ---- ---- */
- 0x79, 0x7A, 0xFD, 0x07, 0x94, 0x07, 0x07, 0x07,
-
- /* 0xF0 0 1 2 3 4 5 6 7 */
- 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
-
- /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
- 0x38, 0x39, 0x07, 0x07, 0x81, 0x07, 0x07, 0x07
-};
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 6382c04d2bdf..96eddb3b1d08 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_LCS) += lcs.o cu3088.o
obj-$(CONFIG_CLAW) += claw.o cu3088.o
-qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_core_offl.o
+qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
obj-$(CONFIG_QETH) += qeth.o
qeth_l2-y += qeth_l2_main.o
obj-$(CONFIG_QETH_L2) += qeth_l2.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index f5e618562c5f..30a43cc79e76 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -60,6 +60,9 @@
* 1.25 Added Packing support
* 1.5
*/
+
+#define KMSG_COMPONENT "claw"
+
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <asm/debug.h>
@@ -94,7 +97,7 @@
CLAW uses the s390dbf file system see claw_trace and claw_setup
*/
-
+static char version[] __initdata = "CLAW driver";
static char debug_buffer[255];
/**
* Debug Facility Stuff
@@ -206,20 +209,30 @@ static struct net_device_stats *claw_stats(struct net_device *dev);
static int pages_to_order_of_mag(int num_of_pages);
static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
/* sysfs Functions */
-static ssize_t claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf);
-static ssize_t claw_hname_write(struct device *dev, struct device_attribute *attr,
+static ssize_t claw_hname_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_hname_write(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count);
-static ssize_t claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf);
-static ssize_t claw_adname_write(struct device *dev, struct device_attribute *attr,
+static ssize_t claw_adname_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_adname_write(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count);
-static ssize_t claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf);
-static ssize_t claw_apname_write(struct device *dev, struct device_attribute *attr,
+static ssize_t claw_apname_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_apname_write(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count);
-static ssize_t claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf);
-static ssize_t claw_wbuff_write(struct device *dev, struct device_attribute *attr,
+static ssize_t claw_wbuff_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_wbuff_write(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count);
-static ssize_t claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf);
-static ssize_t claw_rbuff_write(struct device *dev, struct device_attribute *attr,
+static ssize_t claw_rbuff_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_rbuff_write(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count);
static int claw_add_files(struct device *dev);
static void claw_remove_files(struct device *dev);
@@ -298,8 +311,8 @@ claw_probe(struct ccwgroup_device *cgdev)
if (rc) {
probe_error(cgdev);
put_device(&cgdev->dev);
- printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
- dev_name(&cgdev->cdev[0]->dev), __func__, __LINE__);
+ dev_err(&cgdev->dev, "Creating the /proc files for a new"
+ " CLAW device failed\n");
CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
return rc;
}
@@ -335,6 +348,8 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
rc=claw_hw_tx( skb, dev, 1 );
spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
+ if (rc)
+ rc = NETDEV_TX_BUSY;
return rc;
} /* end of claw_tx */
@@ -496,7 +511,8 @@ claw_open(struct net_device *dev)
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
(((privptr->channel[READ].flag |
privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
- printk(KERN_INFO "%s: remote side is not ready\n", dev->name);
+ dev_info(&privptr->channel[READ].cdev->dev,
+ "%s: remote side is not ready\n", dev->name);
CLAW_DBF_TEXT(2, trace, "notrdy");
for ( i = 0; i < 2; i++) {
@@ -582,10 +598,9 @@ claw_irq_handler(struct ccw_device *cdev,
CLAW_DBF_TEXT(4, trace, "clawirq");
/* Bypass all 'unsolicited interrupts' */
if (!cdev->dev.driver_data) {
- printk(KERN_WARNING "claw: unsolicited interrupt for device:"
- "%s received c-%02x d-%02x\n",
- dev_name(&cdev->dev), irb->scsw.cmd.cstat,
- irb->scsw.cmd.dstat);
+ dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
+ " IRQ, c-%02x d-%02x\n",
+ irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
CLAW_DBF_TEXT(2, trace, "badirq");
return;
}
@@ -597,8 +612,7 @@ claw_irq_handler(struct ccw_device *cdev,
else if (privptr->channel[WRITE].cdev == cdev)
p_ch = &privptr->channel[WRITE];
else {
- printk(KERN_WARNING "claw: Can't determine channel for "
- "interrupt, device %s\n", dev_name(&cdev->dev));
+ dev_warn(&cdev->dev, "The device is not a CLAW device\n");
CLAW_DBF_TEXT(2, trace, "badchan");
return;
}
@@ -612,7 +626,8 @@ claw_irq_handler(struct ccw_device *cdev,
/* Check for good subchannel return code, otherwise info message */
if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
- printk(KERN_INFO "%s: subchannel check for device: %04x -"
+ dev_info(&cdev->dev,
+ "%s: subchannel check for device: %04x -"
" Sch Stat %02x Dev Stat %02x CPA - %04x\n",
dev->name, p_ch->devno,
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
@@ -651,7 +666,7 @@ claw_irq_handler(struct ccw_device *cdev,
wake_up(&p_ch->wait); /* wake claw_open (READ)*/
} else if (p_ch->flag == CLAW_WRITE) {
p_ch->claw_state = CLAW_START_WRITE;
- /* send SYSTEM_VALIDATE */
+ /* send SYSTEM_VALIDATE */
claw_strt_read(dev, LOCK_NO);
claw_send_control(dev,
SYSTEM_VALIDATE_REQUEST,
@@ -659,10 +674,9 @@ claw_irq_handler(struct ccw_device *cdev,
p_env->host_name,
p_env->adapter_name);
} else {
- printk(KERN_WARNING "claw: unsolicited "
- "interrupt for device:"
- "%s received c-%02x d-%02x\n",
- dev_name(&cdev->dev),
+ dev_warn(&cdev->dev, "The CLAW device received"
+ " an unexpected IRQ, "
+ "c-%02x d-%02x\n",
irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
return;
@@ -677,8 +691,8 @@ claw_irq_handler(struct ccw_device *cdev,
(p_ch->irb->ecw[0] & 0x40) == 0x40 ||
(p_ch->irb->ecw[0]) == 0) {
privptr->stats.rx_errors++;
- printk(KERN_INFO "%s: Restart is "
- "required after remote "
+ dev_info(&cdev->dev,
+ "%s: Restart is required after remote "
"side recovers \n",
dev->name);
}
@@ -713,11 +727,13 @@ claw_irq_handler(struct ccw_device *cdev,
return;
case CLAW_START_WRITE:
if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
- printk(KERN_INFO "%s: Unit Check Occured in "
+ dev_info(&cdev->dev,
+ "%s: Unit Check Occured in "
"write channel\n", dev->name);
clear_bit(0, (void *)&p_ch->IO_active);
if (p_ch->irb->ecw[0] & 0x80) {
- printk(KERN_INFO "%s: Resetting Event "
+ dev_info(&cdev->dev,
+ "%s: Resetting Event "
"occurred:\n", dev->name);
init_timer(&p_ch->timer);
p_ch->timer.function =
@@ -725,7 +741,8 @@ claw_irq_handler(struct ccw_device *cdev,
p_ch->timer.data = (unsigned long)p_ch;
p_ch->timer.expires = jiffies + 10*HZ;
add_timer(&p_ch->timer);
- printk(KERN_INFO "%s: write connection "
+ dev_info(&cdev->dev,
+ "%s: write connection "
"restarting\n", dev->name);
}
CLAW_DBF_TEXT(4, trace, "rstrtwrt");
@@ -733,9 +750,10 @@ claw_irq_handler(struct ccw_device *cdev,
}
if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
clear_bit(0, (void *)&p_ch->IO_active);
- printk(KERN_INFO "%s: Unit Exception "
- "Occured in write channel\n",
- dev->name);
+ dev_info(&cdev->dev,
+ "%s: Unit Exception "
+ "occurred in write channel\n",
+ dev->name);
}
if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
(p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
@@ -757,8 +775,9 @@ claw_irq_handler(struct ccw_device *cdev,
CLAW_DBF_TEXT(4, trace, "StWtExit");
return;
default:
- printk(KERN_WARNING "%s: wrong selection code - irq "
- "state=%d\n", dev->name, p_ch->claw_state);
+ dev_warn(&cdev->dev,
+ "The CLAW device for %s received an unexpected IRQ\n",
+ dev->name);
CLAW_DBF_TEXT(2, trace, "badIRQ");
return;
}
@@ -910,8 +929,10 @@ claw_release(struct net_device *dev)
if (((privptr->channel[READ].last_dstat |
privptr->channel[WRITE].last_dstat) &
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
- printk(KERN_WARNING "%s: channel problems during close - "
- "read: %02x - write: %02x\n",
+ dev_warn(&privptr->channel[READ].cdev->dev,
+ "Deactivating %s completed with incorrect"
+ " subchannel status "
+ "(read %02x, write %02x)\n",
dev->name,
privptr->channel[READ].last_dstat,
privptr->channel[WRITE].last_dstat);
@@ -1012,7 +1033,7 @@ static int
pages_to_order_of_mag(int num_of_pages)
{
int order_of_mag=1; /* assume 2 pages */
- int nump=2;
+ int nump;
CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
@@ -1076,8 +1097,8 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
}
if ( privptr-> p_read_active_first ==NULL ) {
- privptr-> p_read_active_first= p_first; /* set new first */
- privptr-> p_read_active_last = p_last; /* set new last */
+ privptr->p_read_active_first = p_first; /* set new first */
+ privptr->p_read_active_last = p_last; /* set new last */
}
else {
@@ -1113,7 +1134,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
privptr->p_read_active_last->r_TIC_2.cda=
(__u32)__pa(&p_first->read);
}
- /* chain in new set of blocks */
+ /* chain in new set of blocks */
privptr->p_read_active_last->next = p_first;
privptr->p_read_active_last=p_last;
} /* end of if ( privptr-> p_read_active_first ==NULL) */
@@ -1135,21 +1156,18 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
case -EBUSY: /* BUSY is a transient state no action needed */
break;
case -ENODEV:
- printk(KERN_EMERG "%s: Missing device called "
- "for IO ENODEV\n", dev_name(&cdev->dev));
- break;
- case -EIO:
- printk(KERN_EMERG "%s: Status pending... EIO \n",
- dev_name(&cdev->dev));
+ dev_err(&cdev->dev, "The remote channel adapter is not"
+ " available\n");
break;
case -EINVAL:
- printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n",
- dev_name(&cdev->dev));
+ dev_err(&cdev->dev,
+ "The status of the remote channel adapter"
+ " is not valid\n");
break;
default:
- printk(KERN_EMERG "%s: Unknown error in "
- "Do_IO %d\n", dev_name(&cdev->dev),
- return_code);
+ dev_err(&cdev->dev, "The common device layer"
+ " returned error code %d\n",
+ return_code);
}
}
CLAW_DBF_TEXT(4, trace, "ccwret");
@@ -1163,42 +1181,37 @@ static void
ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
{
struct net_device *ndev = p_ch->ndev;
+ struct device *dev = &p_ch->cdev->dev;
CLAW_DBF_TEXT(4, trace, "unitchek");
- printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n",
- ndev->name, sense);
-
- if (sense & 0x40) {
- if (sense & 0x01) {
- printk(KERN_WARNING "%s: Interface disconnect or "
- "Selective reset "
- "occurred (remote side)\n", ndev->name);
- }
- else {
- printk(KERN_WARNING "%s: System reset occured"
- " (remote side)\n", ndev->name);
- }
- }
- else if (sense & 0x20) {
- if (sense & 0x04) {
- printk(KERN_WARNING "%s: Data-streaming "
- "timeout)\n", ndev->name);
- }
- else {
- printk(KERN_WARNING "%s: Data-transfer parity"
- " error\n", ndev->name);
- }
- }
- else if (sense & 0x10) {
- if (sense & 0x20) {
- printk(KERN_WARNING "%s: Hardware malfunction "
- "(remote side)\n", ndev->name);
- }
- else {
- printk(KERN_WARNING "%s: read-data parity error "
- "(remote side)\n", ndev->name);
- }
- }
+ dev_warn(dev, "The communication peer of %s disconnected\n",
+ ndev->name);
+
+ if (sense & 0x40) {
+ if (sense & 0x01) {
+ dev_warn(dev, "The remote channel adapter for"
+ " %s has been reset\n",
+ ndev->name);
+ }
+ } else if (sense & 0x20) {
+ if (sense & 0x04) {
+ dev_warn(dev, "A data streaming timeout occurred"
+ " for %s\n",
+ ndev->name);
+ } else if (sense & 0x10) {
+ dev_warn(dev, "The remote channel adapter for %s"
+ " is faulty\n",
+ ndev->name);
+ } else {
+ dev_warn(dev, "A data transfer parity error occurred"
+ " for %s\n",
+ ndev->name);
+ }
+ } else if (sense & 0x10) {
+ dev_warn(dev, "A read data parity error occurred"
+ " for %s\n",
+ ndev->name);
+ }
} /* end of ccw_check_unit_check */
@@ -1235,7 +1248,7 @@ find_link(struct net_device *dev, char *host_name, char *ws_name )
break;
}
- return 0;
+ return rc;
} /* end of find_link */
/*-------------------------------------------------------------------*
@@ -1347,7 +1360,10 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
privptr->p_write_free_chain=p_this_ccw->next;
p_this_ccw->next=NULL;
--privptr->write_free_count; /* -1 */
- bytesInThisBuffer=len_of_data;
+ if (len_of_data >= privptr->p_env->write_size)
+ bytesInThisBuffer = privptr->p_env->write_size;
+ else
+ bytesInThisBuffer = len_of_data;
memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
len_of_data-=bytesInThisBuffer;
pDataAddress+=(unsigned long)bytesInThisBuffer;
@@ -1375,7 +1391,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
*/
if (p_first_ccw!=NULL) {
- /* setup ending ccw sequence for this segment */
+ /* setup ending ccw sequence for this segment */
pEnd=privptr->p_end_ccw;
if (pEnd->write1) {
pEnd->write1=0x00; /* second end ccw is now active */
@@ -1697,10 +1713,11 @@ init_ccw_bk(struct net_device *dev)
p_buf-> w_TIC_1.flags = 0;
p_buf-> w_TIC_1.count = 0;
- if (((unsigned long)p_buff+privptr->p_env->write_size) >=
+ if (((unsigned long)p_buff +
+ privptr->p_env->write_size) >=
((unsigned long)(p_buff+2*
- (privptr->p_env->write_size) -1) & PAGE_MASK)) {
- p_buff= p_buff+privptr->p_env->write_size;
+ (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
+ p_buff = p_buff+privptr->p_env->write_size;
}
}
}
@@ -1840,15 +1857,16 @@ init_ccw_bk(struct net_device *dev)
p_buf->header.opcode=0xff;
p_buf->header.flag=CLAW_PENDING;
- if (((unsigned long)p_buff+privptr->p_env->read_size) >=
- ((unsigned long)(p_buff+2*(privptr->p_env->read_size) -1)
- & PAGE_MASK) ) {
+ if (((unsigned long)p_buff+privptr->p_env->read_size) >=
+ ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
+ -1)
+ & PAGE_MASK)) {
p_buff= p_buff+privptr->p_env->read_size;
}
else {
p_buff=
(void *)((unsigned long)
- (p_buff+2*(privptr->p_env->read_size) -1)
+ (p_buff+2*(privptr->p_env->read_size)-1)
& PAGE_MASK) ;
}
} /* for read_buffers */
@@ -1856,24 +1874,28 @@ init_ccw_bk(struct net_device *dev)
else { /* read Size >= PAGE_SIZE */
for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
p_buff = (void *)__get_free_pages(__GFP_DMA,
- (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) );
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perread));
if (p_buff==NULL) {
free_pages((unsigned long)privptr->p_buff_ccw,
- (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
+ (int)pages_to_order_of_mag(privptr->
+ p_buff_ccw_num));
/* free the write pages */
p_buf=privptr->p_buff_write;
while (p_buf!=NULL) {
- free_pages((unsigned long)p_buf->p_buffer,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perwrite ));
+ free_pages(
+ (unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite));
p_buf=p_buf->next;
}
/* free any read pages already alloc */
p_buf=privptr->p_buff_read;
while (p_buf!=NULL) {
- free_pages((unsigned long)p_buf->p_buffer,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perread ));
+ free_pages(
+ (unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perread));
p_buf=p_buf->next;
}
privptr->p_buff_ccw=NULL;
@@ -2003,7 +2025,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
tdev = &privptr->channel[READ].cdev->dev;
memcpy( &temp_host_name, p_env->host_name, 8);
memcpy( &temp_ws_name, p_env->adapter_name , 8);
- printk(KERN_INFO "%s: CLAW device %.8s: "
+ dev_info(tdev, "%s: CLAW device %.8s: "
"Received Control Packet\n",
dev->name, temp_ws_name);
if (privptr->release_pend==1) {
@@ -2022,32 +2044,30 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
if (p_ctlbk->version != CLAW_VERSION_ID) {
claw_snd_sys_validate_rsp(dev, p_ctlbk,
CLAW_RC_WRONG_VERSION);
- printk("%s: %d is wrong version id. "
- "Expected %d\n",
- dev->name, p_ctlbk->version,
- CLAW_VERSION_ID);
+ dev_warn(tdev, "The communication peer of %s"
+ " uses an incorrect API version %d\n",
+ dev->name, p_ctlbk->version);
}
p_sysval = (struct sysval *)&(p_ctlbk->data);
- printk("%s: Recv Sys Validate Request: "
- "Vers=%d,link_id=%d,Corr=%d,WS name=%."
- "8s,Host name=%.8s\n",
- dev->name, p_ctlbk->version,
- p_ctlbk->linkid,
- p_ctlbk->correlator,
- p_sysval->WS_name,
- p_sysval->host_name);
+ dev_info(tdev, "%s: Recv Sys Validate Request: "
+ "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
+ "Host name=%.8s\n",
+ dev->name, p_ctlbk->version,
+ p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ p_sysval->WS_name,
+ p_sysval->host_name);
if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
claw_snd_sys_validate_rsp(dev, p_ctlbk,
CLAW_RC_NAME_MISMATCH);
CLAW_DBF_TEXT(2, setup, "HSTBAD");
CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
- printk(KERN_INFO "%s: Host name mismatch\n",
- dev->name);
- printk(KERN_INFO "%s: Received :%s: "
- "expected :%s: \n",
- dev->name,
+ dev_warn(tdev,
+ "Host name %s for %s does not match the"
+ " remote adapter name %s\n",
p_sysval->host_name,
+ dev->name,
temp_host_name);
}
if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
@@ -2056,35 +2076,38 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
CLAW_DBF_TEXT(2, setup, "WSNBAD");
CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
- printk(KERN_INFO "%s: WS name mismatch\n",
- dev->name);
- printk(KERN_INFO "%s: Received :%s: "
- "expected :%s: \n",
- dev->name,
- p_sysval->WS_name,
- temp_ws_name);
+ dev_warn(tdev, "Adapter name %s for %s does not match"
+ " the remote host name %s\n",
+ p_sysval->WS_name,
+ dev->name,
+ temp_ws_name);
}
if ((p_sysval->write_frame_size < p_env->write_size) &&
(p_env->packing == 0)) {
claw_snd_sys_validate_rsp(dev, p_ctlbk,
CLAW_RC_HOST_RCV_TOO_SMALL);
- printk(KERN_INFO "%s: host write size is too "
- "small\n", dev->name);
+ dev_warn(tdev,
+ "The local write buffer is smaller than the"
+ " remote read buffer\n");
CLAW_DBF_TEXT(2, setup, "wrtszbad");
}
if ((p_sysval->read_frame_size < p_env->read_size) &&
(p_env->packing == 0)) {
claw_snd_sys_validate_rsp(dev, p_ctlbk,
CLAW_RC_HOST_RCV_TOO_SMALL);
- printk(KERN_INFO "%s: host read size is too "
- "small\n", dev->name);
+ dev_warn(tdev,
+ "The local read buffer is smaller than the"
+ " remote write buffer\n");
CLAW_DBF_TEXT(2, setup, "rdsizbad");
}
claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
- printk(KERN_INFO "%s: CLAW device %.8s: System validate "
- "completed.\n", dev->name, temp_ws_name);
- printk("%s: sys Validate Rsize:%d Wsize:%d\n", dev->name,
- p_sysval->read_frame_size, p_sysval->write_frame_size);
+ dev_info(tdev,
+ "CLAW device %.8s: System validate"
+ " completed.\n", temp_ws_name);
+ dev_info(tdev,
+ "%s: sys Validate Rsize:%d Wsize:%d\n",
+ dev->name, p_sysval->read_frame_size,
+ p_sysval->write_frame_size);
privptr->system_validate_comp = 1;
if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
p_env->packing = PACKING_ASK;
@@ -2092,8 +2115,10 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
break;
case SYSTEM_VALIDATE_RESPONSE:
p_sysval = (struct sysval *)&(p_ctlbk->data);
- printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d,"
- "WS name=%.8s,Host name=%.8s\n",
+ dev_info(tdev,
+ "Settings for %s validated (version=%d, "
+ "remote device=%d, rc=%d, adapter name=%.8s, "
+ "host name=%.8s)\n",
dev->name,
p_ctlbk->version,
p_ctlbk->correlator,
@@ -2102,41 +2127,39 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
p_sysval->host_name);
switch (p_ctlbk->rc) {
case 0:
- printk(KERN_INFO "%s: CLAW device "
- "%.8s: System validate "
- "completed.\n",
- dev->name, temp_ws_name);
+ dev_info(tdev, "%s: CLAW device "
+ "%.8s: System validate completed.\n",
+ dev->name, temp_ws_name);
if (privptr->system_validate_comp == 0)
claw_strt_conn_req(dev);
privptr->system_validate_comp = 1;
break;
case CLAW_RC_NAME_MISMATCH:
- printk(KERN_INFO "%s: Sys Validate "
- "Resp : Host, WS name is "
- "mismatch\n",
- dev->name);
+ dev_warn(tdev, "Validating %s failed because of"
+ " a host or adapter name mismatch\n",
+ dev->name);
break;
case CLAW_RC_WRONG_VERSION:
- printk(KERN_INFO "%s: Sys Validate "
- "Resp : Wrong version\n",
+ dev_warn(tdev, "Validating %s failed because of a"
+ " version conflict\n",
dev->name);
break;
case CLAW_RC_HOST_RCV_TOO_SMALL:
- printk(KERN_INFO "%s: Sys Validate "
- "Resp : bad frame size\n",
+ dev_warn(tdev, "Validating %s failed because of a"
+ " frame size conflict\n",
dev->name);
break;
default:
- printk(KERN_INFO "%s: Sys Validate "
- "error code=%d \n",
- dev->name, p_ctlbk->rc);
+ dev_warn(tdev, "The communication peer of %s rejected"
+ " the connection\n",
+ dev->name);
break;
}
break;
case CONNECTION_REQUEST:
p_connect = (struct conncmd *)&(p_ctlbk->data);
- printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d,"
+ dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
"Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
dev->name,
p_ctlbk->version,
@@ -2146,21 +2169,21 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
p_connect->WS_name);
if (privptr->active_link_ID != 0) {
claw_snd_disc(dev, p_ctlbk);
- printk(KERN_INFO "%s: Conn Req error : "
- "already logical link is active \n",
+ dev_info(tdev, "%s rejected a connection request"
+ " because it is already active\n",
dev->name);
}
if (p_ctlbk->linkid != 1) {
claw_snd_disc(dev, p_ctlbk);
- printk(KERN_INFO "%s: Conn Req error : "
- "req logical link id is not 1\n",
+ dev_info(tdev, "%s rejected a request to open multiple"
+ " connections\n",
dev->name);
}
rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
if (rc != 0) {
claw_snd_disc(dev, p_ctlbk);
- printk(KERN_INFO "%s: Conn Resp error: "
- "req appl name does not match\n",
+ dev_info(tdev, "%s rejected a connection request"
+ " because of a type mismatch\n",
dev->name);
}
claw_send_control(dev,
@@ -2172,7 +2195,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
p_env->packing = PACK_SEND;
claw_snd_conn_req(dev, 0);
}
- printk(KERN_INFO "%s: CLAW device %.8s: Connection "
+ dev_info(tdev, "%s: CLAW device %.8s: Connection "
"completed link_id=%d.\n",
dev->name, temp_ws_name,
p_ctlbk->linkid);
@@ -2182,7 +2205,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
break;
case CONNECTION_RESPONSE:
p_connect = (struct conncmd *)&(p_ctlbk->data);
- printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d,"
+ dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
"Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
dev->name,
p_ctlbk->version,
@@ -2193,16 +2216,18 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
p_connect->WS_name);
if (p_ctlbk->rc != 0) {
- printk(KERN_INFO "%s: Conn Resp error: rc=%d \n",
- dev->name, p_ctlbk->rc);
+ dev_warn(tdev, "The communication peer of %s rejected"
+ " a connection request\n",
+ dev->name);
return 1;
}
rc = find_link(dev,
p_connect->host_name, p_connect->WS_name);
if (rc != 0) {
claw_snd_disc(dev, p_ctlbk);
- printk(KERN_INFO "%s: Conn Resp error: "
- "req appl name does not match\n",
+ dev_warn(tdev, "The communication peer of %s"
+ " rejected a connection "
+ "request because of a type mismatch\n",
dev->name);
}
/* should be until CONNECTION_CONFIRM */
@@ -2210,7 +2235,8 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
break;
case CONNECTION_CONFIRM:
p_connect = (struct conncmd *)&(p_ctlbk->data);
- printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
+ dev_info(tdev,
+ "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
"Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
dev->name,
p_ctlbk->version,
@@ -2221,21 +2247,21 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
privptr->active_link_ID = p_ctlbk->linkid;
if (p_env->packing > PACKING_ASK) {
- printk(KERN_INFO "%s: Confirmed Now packing\n",
- dev->name);
+ dev_info(tdev,
+ "%s: Confirmed Now packing\n", dev->name);
p_env->packing = DO_PACKED;
}
p_ch = &privptr->channel[WRITE];
wake_up(&p_ch->wait);
} else {
- printk(KERN_INFO "%s: Conn confirm: "
- "unexpected linkid=%d \n",
+ dev_warn(tdev, "Activating %s failed because of"
+ " an incorrect link ID=%d\n",
dev->name, p_ctlbk->linkid);
claw_snd_disc(dev, p_ctlbk);
}
break;
case DISCONNECT:
- printk(KERN_INFO "%s: Disconnect: "
+ dev_info(tdev, "%s: Disconnect: "
"Vers=%d,link_id=%d,Corr=%d\n",
dev->name, p_ctlbk->version,
p_ctlbk->linkid, p_ctlbk->correlator);
@@ -2247,12 +2273,13 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
privptr->active_link_ID = 0;
break;
case CLAW_ERROR:
- printk(KERN_INFO "%s: CLAW ERROR detected\n",
+ dev_warn(tdev, "The communication peer of %s failed\n",
dev->name);
break;
default:
- printk(KERN_INFO "%s: Unexpected command code=%d \n",
- dev->name, p_ctlbk->command);
+ dev_warn(tdev, "The communication peer of %s sent"
+ " an unknown command code\n",
+ dev->name);
break;
}
@@ -2294,12 +2321,14 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
memcpy(&p_sysval->host_name, local_name, 8);
memcpy(&p_sysval->WS_name, remote_name, 8);
if (privptr->p_env->packing > 0) {
- p_sysval->read_frame_size=DEF_PACK_BUFSIZE;
- p_sysval->write_frame_size=DEF_PACK_BUFSIZE;
+ p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
+ p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
} else {
/* how big is the biggest group of packets */
- p_sysval->read_frame_size=privptr->p_env->read_size;
- p_sysval->write_frame_size=privptr->p_env->write_size;
+ p_sysval->read_frame_size =
+ privptr->p_env->read_size;
+ p_sysval->write_frame_size =
+ privptr->p_env->write_size;
}
memset(&p_sysval->reserved, 0x00, 4);
break;
@@ -2485,7 +2514,6 @@ unpack_read(struct net_device *dev )
p_dev = &privptr->channel[READ].cdev->dev;
p_env = privptr->p_env;
p_this_ccw=privptr->p_read_active_first;
- i=0;
while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
pack_off = 0;
p = 0;
@@ -2511,8 +2539,10 @@ unpack_read(struct net_device *dev )
mtc_this_frm=1;
if (p_this_ccw->header.length!=
privptr->p_env->read_size ) {
- printk(KERN_INFO " %s: Invalid frame detected "
- "length is %02x\n" ,
+ dev_warn(p_dev,
+ "The communication peer of %s"
+ " sent a faulty"
+ " frame of length %02x\n",
dev->name, p_this_ccw->header.length);
}
}
@@ -2544,7 +2574,7 @@ unpack_next:
goto NextFrame;
p_packd = p_this_ccw->p_buffer+pack_off;
p_packh = (struct clawph *) p_packd;
- if ((p_packh->len == 0) || /* all done with this frame? */
+ if ((p_packh->len == 0) || /* done with this frame? */
(p_packh->flag != 0))
goto NextFrame;
bytes_to_mov = p_packh->len;
@@ -2594,9 +2624,9 @@ unpack_next:
netif_rx(skb);
}
else {
+ dev_info(p_dev, "Allocating a buffer for"
+ " incoming data failed\n");
privptr->stats.rx_dropped++;
- printk(KERN_WARNING "%s: %s() low on memory\n",
- dev->name,__func__);
}
privptr->mtc_offset=0;
privptr->mtc_logical_link=-1;
@@ -2720,8 +2750,8 @@ claw_strt_out_IO( struct net_device *dev )
if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
parm = (unsigned long) p_ch;
CLAW_DBF_TEXT(2, trace, "StWrtIO");
- rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm,
- 0xff, 0);
+ rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
+ 0xff, 0);
if (rc != 0) {
ccw_check_return_code(p_ch->cdev, rc);
}
@@ -2816,22 +2846,26 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
* Initialize everything of the net device except the name and the
* channel structs.
*/
+static const struct net_device_ops claw_netdev_ops = {
+ .ndo_open = claw_open,
+ .ndo_stop = claw_release,
+ .ndo_get_stats = claw_stats,
+ .ndo_start_xmit = claw_tx,
+ .ndo_change_mtu = claw_change_mtu,
+};
+
static void
claw_init_netdevice(struct net_device * dev)
{
CLAW_DBF_TEXT(2, setup, "init_dev");
CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
dev->mtu = CLAW_DEFAULT_MTU_SIZE;
- dev->hard_start_xmit = claw_tx;
- dev->open = claw_open;
- dev->stop = claw_release;
- dev->get_stats = claw_stats;
- dev->change_mtu = claw_change_mtu;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 1300;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->netdev_ops = &claw_netdev_ops;
CLAW_DBF_TEXT(2, setup, "initok");
return;
}
@@ -2880,8 +2914,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
int ret;
struct ccw_dev_id dev_id;
- printk(KERN_INFO "claw: add for %s\n",
- dev_name(&cgdev->cdev[READ]->dev));
+ dev_info(&cgdev->dev, "add for %s\n",
+ dev_name(&cgdev->cdev[READ]->dev));
CLAW_DBF_TEXT(2, setup, "new_dev");
privptr = cgdev->dev.driver_data;
cgdev->cdev[READ]->dev.driver_data = privptr;
@@ -2897,29 +2931,28 @@ claw_new_device(struct ccwgroup_device *cgdev)
if (ret == 0)
ret = add_channel(cgdev->cdev[1],1,privptr);
if (ret != 0) {
- printk(KERN_WARNING
- "add channel failed with ret = %d\n", ret);
+ dev_warn(&cgdev->dev, "Creating a CLAW group device"
+ " failed with error code %d\n", ret);
goto out;
}
ret = ccw_device_set_online(cgdev->cdev[READ]);
if (ret != 0) {
- printk(KERN_WARNING
- "claw: ccw_device_set_online %s READ failed "
- "with ret = %d\n", dev_name(&cgdev->cdev[READ]->dev),
- ret);
+ dev_warn(&cgdev->dev,
+ "Setting the read subchannel online"
+ " failed with error code %d\n", ret);
goto out;
}
ret = ccw_device_set_online(cgdev->cdev[WRITE]);
if (ret != 0) {
- printk(KERN_WARNING
- "claw: ccw_device_set_online %s WRITE failed "
- "with ret = %d\n", dev_name(&cgdev->cdev[WRITE]->dev),
- ret);
+ dev_warn(&cgdev->dev,
+ "Setting the write subchannel online "
+ "failed with error code %d\n", ret);
goto out;
}
dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
if (!dev) {
- printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__);
+ dev_warn(&cgdev->dev,
+ "Activating the CLAW device failed\n");
goto out;
}
dev->ml_priv = privptr;
@@ -2947,13 +2980,13 @@ claw_new_device(struct ccwgroup_device *cgdev)
privptr->channel[WRITE].ndev = dev;
privptr->p_env->ndev = dev;
- printk(KERN_INFO "%s:readsize=%d writesize=%d "
+ dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
"readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
dev->name, p_env->read_size,
p_env->write_size, p_env->read_buffers,
p_env->write_buffers, p_env->devno[READ],
p_env->devno[WRITE]);
- printk(KERN_INFO "%s:host_name:%.8s, adapter_name "
+ dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
":%.8s api_type: %.8s\n",
dev->name, p_env->host_name,
p_env->adapter_name , p_env->api_type);
@@ -2997,8 +3030,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
ndev = priv->channel[READ].ndev;
if (ndev) {
/* Close the device */
- printk(KERN_INFO
- "%s: shuting down \n",ndev->name);
+ dev_info(&cgdev->dev, "%s: shutting down \n",
+ ndev->name);
if (ndev->flags & IFF_RUNNING)
ret = claw_release(ndev);
ndev->flags &=~IFF_RUNNING;
@@ -3023,8 +3056,7 @@ claw_remove_device(struct ccwgroup_device *cgdev)
CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
priv = cgdev->dev.driver_data;
BUG_ON(!priv);
- printk(KERN_INFO "claw: %s() called %s will be removed.\n",
- __func__, dev_name(&cgdev->cdev[0]->dev));
+ dev_info(&cgdev->dev, " will be removed.\n");
if (cgdev->state == CCWGROUP_ONLINE)
claw_shutdown_device(cgdev);
claw_remove_files(&cgdev->dev);
@@ -3063,7 +3095,8 @@ claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-claw_hname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+claw_hname_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct claw_privbk *priv;
struct claw_env * p_env;
@@ -3100,7 +3133,8 @@ claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-claw_adname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+claw_adname_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct claw_privbk *priv;
struct claw_env * p_env;
@@ -3138,7 +3172,8 @@ claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-claw_apname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+claw_apname_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct claw_privbk *priv;
struct claw_env * p_env;
@@ -3185,7 +3220,8 @@ claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-claw_wbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+claw_wbuff_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct claw_privbk *priv;
struct claw_env * p_env;
@@ -3226,7 +3262,8 @@ claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-claw_rbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+claw_rbuff_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct claw_privbk *priv;
struct claw_env *p_env;
@@ -3289,7 +3326,7 @@ claw_cleanup(void)
{
unregister_cu3088_discipline(&claw_group_driver);
claw_unregister_debug_facility();
- printk(KERN_INFO "claw: Driver unloaded\n");
+ pr_info("Driver unloaded\n");
}
@@ -3303,12 +3340,12 @@ static int __init
claw_init(void)
{
int ret = 0;
- printk(KERN_INFO "claw: starting driver\n");
+ pr_info("Loading %s\n", version);
ret = claw_register_debug_facility();
if (ret) {
- printk(KERN_WARNING "claw: %s() debug_register failed %d\n",
- __func__,ret);
+ pr_err("Registering with the S/390 debug feature"
+ " failed with error code %d\n", ret);
return ret;
}
CLAW_DBF_TEXT(2, setup, "init_mod");
@@ -3316,8 +3353,8 @@ claw_init(void)
if (ret) {
CLAW_DBF_TEXT(2, setup, "init_bad");
claw_unregister_debug_facility();
- printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n",
- __func__,ret);
+ pr_err("Registering with the cu3088 device driver failed "
+ "with error code %d\n", ret);
}
return ret;
}
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index f29c7086fc19..4ded9ac2c5ef 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -410,9 +410,8 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
priv->stats.rx_length_errors++;
goto again;
}
- block_len -= 2;
- if (block_len > 0) {
- *((__u16 *)skb->data) = block_len;
+ if (block_len > 2) {
+ *((__u16 *)skb->data) = block_len - 2;
ctcm_unpack_skb(ch, skb);
}
again:
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 2678573becec..77f4033a0f4f 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -105,7 +105,8 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
return;
}
pskb->protocol = ntohs(header->type);
- if (header->length <= LL_HEADER_LENGTH) {
+ if ((header->length <= LL_HEADER_LENGTH) ||
+ (len <= LL_HEADER_LENGTH)) {
if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): Illegal packet size %d(%d,%d)"
@@ -167,11 +168,9 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
if (len > 0) {
skb_pull(pskb, header->length);
if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
- if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
- CTCM_DBF_DEV_NAME(TRACE, dev,
- "Overrun in ctcm_unpack_skb");
- ch->logflags |= LOG_FLAG_OVERRUN;
- }
+ CTCM_DBF_DEV_NAME(TRACE, dev,
+ "Overrun in ctcm_unpack_skb");
+ ch->logflags |= LOG_FLAG_OVERRUN;
return;
}
skb_put(pskb, LL_HEADER_LENGTH);
@@ -906,11 +905,11 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
}
if (ctcm_test_and_set_busy(dev))
- return -EBUSY;
+ return NETDEV_TX_BUSY;
dev->trans_start = jiffies;
if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0)
- return 1;
+ return NETDEV_TX_BUSY;
return 0;
}
@@ -1099,12 +1098,24 @@ static void ctcm_free_netdevice(struct net_device *dev)
struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
+static const struct net_device_ops ctcm_netdev_ops = {
+ .ndo_open = ctcm_open,
+ .ndo_stop = ctcm_close,
+ .ndo_get_stats = ctcm_stats,
+ .ndo_change_mtu = ctcm_change_mtu,
+ .ndo_start_xmit = ctcm_tx,
+};
+
+static const struct net_device_ops ctcm_mpc_netdev_ops = {
+ .ndo_open = ctcm_open,
+ .ndo_stop = ctcm_close,
+ .ndo_get_stats = ctcm_stats,
+ .ndo_change_mtu = ctcm_change_mtu,
+ .ndo_start_xmit = ctcmpc_tx,
+};
+
void static ctcm_dev_setup(struct net_device *dev)
{
- dev->open = ctcm_open;
- dev->stop = ctcm_close;
- dev->get_stats = ctcm_stats;
- dev->change_mtu = ctcm_change_mtu;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
@@ -1157,12 +1168,12 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
dev->mtu = MPC_BUFSIZE_DEFAULT -
TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
- dev->hard_start_xmit = ctcmpc_tx;
+ dev->netdev_ops = &ctcm_mpc_netdev_ops;
dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
priv->buffer_size = MPC_BUFSIZE_DEFAULT;
} else {
dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
- dev->hard_start_xmit = ctcm_tx;
+ dev->netdev_ops = &ctcm_netdev_ops;
dev->hard_header_len = LL_HEADER_LENGTH + 2;
}
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 3db5f846bbf6..781e18be7e8f 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -393,7 +393,6 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
} else {
/* there are problems...bail out */
/* there may be a state mismatch so restart */
- grp->port_persist = 1;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
grp->allocchan_callback_retries = 0;
}
@@ -699,11 +698,9 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
return;
done:
- if (rc != 0) {
- grp->in_sweep = 0;
- ctcm_clear_busy_do(dev);
- fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- }
+ grp->in_sweep = 0;
+ ctcm_clear_busy_do(dev);
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
return;
}
@@ -1118,7 +1115,6 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY))
goto done;
- pdu_last_seen = 0;
while ((pskb->len > 0) && !pdu_last_seen) {
curr_pdu = (struct pdu *)pskb->data;
@@ -1396,8 +1392,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
CTCM_FUNTAIL, dev->name);
if ((grp->saved_state != MPCG_STATE_RESET) ||
/* dealloc_channel has been called */
- ((grp->saved_state == MPCG_STATE_RESET) &&
- (grp->port_persist == 0)))
+ (grp->port_persist == 0))
fsm_deltimer(&priv->restart_timer);
wch = priv->channel[WRITE];
@@ -1917,10 +1912,8 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
if (priv)
grp = priv->mpcg;
- if (grp == NULL) {
- fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ if (grp == NULL)
return;
- }
for (direction = READ; direction <= WRITE; direction++) {
struct channel *ch = priv->channel[direction];
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 49c3bfa1afd7..a45bc24eb5f9 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -39,6 +39,7 @@
#include <linux/in.h>
#include <linux/igmp.h>
#include <linux/delay.h>
+#include <linux/kthread.h>
#include <net/arp.h>
#include <net/ip.h>
@@ -1259,7 +1260,6 @@ lcs_register_mc_addresses(void *data)
struct in_device *in4_dev;
card = (struct lcs_card *) data;
- daemonize("regipm");
if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
return 0;
@@ -1562,7 +1562,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
if (skb == NULL) {
card->stats.tx_dropped++;
card->stats.tx_errors++;
- return -EIO;
+ return 0;
}
if (card->state != DEV_STATE_UP) {
dev_kfree_skb(skb);
@@ -1587,7 +1587,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
card->tx_buffer = lcs_get_buffer(&card->write);
if (card->tx_buffer == NULL) {
card->stats.tx_dropped++;
- rc = -EBUSY;
+ rc = NETDEV_TX_BUSY;
goto out;
}
card->tx_buffer->callback = lcs_txbuffer_cb;
@@ -1753,11 +1753,10 @@ lcs_start_kernel_thread(struct work_struct *work)
struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
LCS_DBF_TEXT(5, trace, "krnthrd");
if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
- kernel_thread(lcs_recovery, (void *) card, SIGCHLD);
+ kthread_run(lcs_recovery, card, "lcs_recover");
#ifdef CONFIG_IP_MULTICAST
if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
- kernel_thread(lcs_register_mc_addresses,
- (void *) card, SIGCHLD);
+ kthread_run(lcs_register_mc_addresses, card, "regipm");
#endif
}
@@ -2101,6 +2100,20 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev)
/**
* lcs_new_device will be called by setting the group device online.
*/
+static const struct net_device_ops lcs_netdev_ops = {
+ .ndo_open = lcs_open_device,
+ .ndo_stop = lcs_stop_device,
+ .ndo_get_stats = lcs_getstats,
+ .ndo_start_xmit = lcs_start_xmit,
+};
+
+static const struct net_device_ops lcs_mc_netdev_ops = {
+ .ndo_open = lcs_open_device,
+ .ndo_stop = lcs_stop_device,
+ .ndo_get_stats = lcs_getstats,
+ .ndo_start_xmit = lcs_start_xmit,
+ .ndo_set_multicast_list = lcs_set_multicast_list,
+};
static int
lcs_new_device(struct ccwgroup_device *ccwgdev)
@@ -2168,14 +2181,11 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
goto out;
card->dev = dev;
card->dev->ml_priv = card;
- card->dev->open = lcs_open_device;
- card->dev->stop = lcs_stop_device;
- card->dev->hard_start_xmit = lcs_start_xmit;
- card->dev->get_stats = lcs_getstats;
+ card->dev->netdev_ops = &lcs_netdev_ops;
memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
#ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card))
- card->dev->set_multicast_list = lcs_set_multicast_list;
+ card->dev->netdev_ops = &lcs_mc_netdev_ops;
#endif
netdev_out:
lcs_set_allowed_threads(card,0xffffffff);
@@ -2258,7 +2268,6 @@ lcs_recovery(void *ptr)
int rc;
card = (struct lcs_card *) ptr;
- daemonize("lcs_recover");
LCS_DBF_TEXT(4, trace, "recover1");
if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 930e2fc2a011..be716e45f7ac 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1312,7 +1312,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
if (netiucv_test_and_set_busy(dev)) {
IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
- return -EBUSY;
+ return NETDEV_TX_BUSY;
}
dev->trans_start = jiffies;
rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
@@ -1876,20 +1876,24 @@ static void netiucv_free_netdevice(struct net_device *dev)
/**
* Initialize a net device. (Called from kernel in alloc_netdev())
*/
+static const struct net_device_ops netiucv_netdev_ops = {
+ .ndo_open = netiucv_open,
+ .ndo_stop = netiucv_close,
+ .ndo_get_stats = netiucv_stats,
+ .ndo_start_xmit = netiucv_tx,
+ .ndo_change_mtu = netiucv_change_mtu,
+};
+
static void netiucv_setup_netdevice(struct net_device *dev)
{
dev->mtu = NETIUCV_MTU_DEFAULT;
- dev->hard_start_xmit = netiucv_tx;
- dev->open = netiucv_open;
- dev->stop = netiucv_close;
- dev->get_stats = netiucv_stats;
- dev->change_mtu = netiucv_change_mtu;
dev->destructor = netiucv_free_netdevice;
dev->hard_header_len = NETIUCV_HDRLEN;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->netdev_ops = &netiucv_netdev_ops;
}
/**
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e0c45574b0c8..447e1d19581a 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -134,6 +134,7 @@ struct qeth_perf_stats {
unsigned int sg_skbs_rx;
unsigned int sg_frags_rx;
unsigned int sg_alloc_page_rx;
+ unsigned int tx_csum;
};
/* Routing stuff */
@@ -403,7 +404,6 @@ struct qeth_qdio_q {
/* possible types of qeth large_send support */
enum qeth_large_send_types {
QETH_LARGE_SEND_NO,
- QETH_LARGE_SEND_EDDP,
QETH_LARGE_SEND_TSO,
};
@@ -838,11 +838,9 @@ int qeth_get_cast_type(struct qeth_card *, struct sk_buff *);
int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
- struct sk_buff *, struct qeth_hdr *, int,
- struct qeth_eddp_context *, int, int);
+ struct sk_buff *, struct qeth_hdr *, int, int, int);
int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
- struct sk_buff *, struct qeth_hdr *,
- int, struct qeth_eddp_context *);
+ struct sk_buff *, struct qeth_hdr *, int);
int qeth_core_get_stats_count(struct net_device *);
void qeth_core_get_ethtool_stats(struct net_device *,
struct ethtool_stats *, u64 *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d1b5bebea7fb..c827d69b5a91 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -17,7 +17,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ip.h>
-#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/mii.h>
#include <linux/kthread.h>
@@ -26,7 +25,6 @@
#include <asm/io.h>
#include "qeth_core.h"
-#include "qeth_core_offl.h"
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
@@ -285,17 +283,6 @@ int qeth_set_large_send(struct qeth_card *card,
netif_tx_disable(card->dev);
card->options.large_send = type;
switch (card->options.large_send) {
- case QETH_LARGE_SEND_EDDP:
- if (card->info.type != QETH_CARD_TYPE_IQD) {
- card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM;
- } else {
- card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM);
- card->options.large_send = QETH_LARGE_SEND_NO;
- rc = -EOPNOTSUPP;
- }
- break;
case QETH_LARGE_SEND_TSO:
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
@@ -956,7 +943,6 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
}
- qeth_eddp_buf_release_contexts(buf);
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
kmem_cache_free(qeth_core_header_cache,
@@ -1690,7 +1676,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
int rc;
unsigned long flags;
struct qeth_reply *reply = NULL;
- unsigned long timeout;
+ unsigned long timeout, event_timeout;
struct qeth_ipa_cmd *cmd;
QETH_DBF_TEXT(TRACE, 2, "sendctl");
@@ -1715,9 +1701,10 @@ int qeth_send_control_data(struct qeth_card *card, int len,
qeth_prepare_control_data(card, len, iob);
if (IS_IPA(iob->data))
- timeout = jiffies + QETH_IPA_TIMEOUT;
+ event_timeout = QETH_IPA_TIMEOUT;
else
- timeout = jiffies + QETH_TIMEOUT;
+ event_timeout = QETH_TIMEOUT;
+ timeout = jiffies + event_timeout;
QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
@@ -1745,7 +1732,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
if ((cmd->hdr.command == IPA_CMD_SETIP) &&
(cmd->hdr.prot_version == QETH_PROT_IPV4)) {
if (!wait_event_timeout(reply->wait_q,
- atomic_read(&reply->received), timeout))
+ atomic_read(&reply->received), event_timeout))
goto time_err;
} else {
while (!atomic_read(&reply->received)) {
@@ -2693,40 +2680,21 @@ static int qeth_handle_send_error(struct qeth_card *card,
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
{
int sbalf15 = buffer->buffer->element[15].flags & 0xff;
- int cc = qdio_err & 3;
QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
- switch (cc) {
- case 0:
- if (qdio_err) {
- QETH_DBF_TEXT(TRACE, 1, "lnkfail");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
- (u16)qdio_err, (u8)sbalf15);
- return QETH_SEND_ERROR_LINK_FAILURE;
- }
+
+ if (!qdio_err)
return QETH_SEND_ERROR_NONE;
- case 2:
- if (qdio_err & QDIO_ERROR_SIGA_BUSY) {
- QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- return QETH_SEND_ERROR_KICK_IT;
- }
- if ((sbalf15 >= 15) && (sbalf15 <= 31))
- return QETH_SEND_ERROR_RETRY;
- return QETH_SEND_ERROR_LINK_FAILURE;
- /* look at qdio_error and sbalf 15 */
- case 1:
- QETH_DBF_TEXT(TRACE, 1, "SIGAcc1");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- return QETH_SEND_ERROR_LINK_FAILURE;
- case 3:
- default:
- QETH_DBF_TEXT(TRACE, 1, "SIGAcc3");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- return QETH_SEND_ERROR_KICK_IT;
- }
+
+ if ((sbalf15 >= 15) && (sbalf15 <= 31))
+ return QETH_SEND_ERROR_RETRY;
+
+ QETH_DBF_TEXT(TRACE, 1, "lnkfail");
+ QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
+ QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
+ (u16)qdio_err, (u8)sbalf15);
+ return QETH_SEND_ERROR_LINK_FAILURE;
}
/*
@@ -2862,10 +2830,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
qeth_get_micros() -
queue->card->perf_stats.outbound_do_qdio_start_time;
if (rc) {
+ queue->card->stats.tx_errors += count;
+ /* ignore temporary SIGA errors without busy condition */
+ if (rc == QDIO_ERROR_SIGA_TARGET)
+ return;
QETH_DBF_TEXT(TRACE, 2, "flushbuf");
QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card));
- queue->card->stats.tx_errors += count;
+
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
qeth_schedule_recovery(queue->card);
@@ -2940,13 +2912,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
for (i = first_element; i < (first_element + count); ++i) {
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
- /*we only handle the KICK_IT error by doing a recovery */
- if (qeth_handle_send_error(card, buffer, qdio_error)
- == QETH_SEND_ERROR_KICK_IT){
- netif_stop_queue(card->dev);
- qeth_schedule_recovery(card);
- return;
- }
+ qeth_handle_send_error(card, buffer, qdio_error);
qeth_clear_output_buffer(queue, buffer);
}
atomic_sub(count, &queue->used_buffers);
@@ -3187,11 +3153,9 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
int qeth_do_send_packet_fast(struct qeth_card *card,
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, int elements_needed,
- struct qeth_eddp_context *ctx, int offset, int hd_len)
+ int offset, int hd_len)
{
struct qeth_qdio_out_buffer *buffer;
- int buffers_needed = 0;
- int flush_cnt = 0;
int index;
/* spin until we get the queue ... */
@@ -3206,27 +3170,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
goto out;
- if (ctx == NULL)
- queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
- else {
- buffers_needed = qeth_eddp_check_buffers_for_context(queue,
- ctx);
- if (buffers_needed < 0)
- goto out;
- queue->next_buf_to_fill =
- (queue->next_buf_to_fill + buffers_needed) %
- QDIO_MAX_BUFFERS_PER_Q;
- }
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
- if (ctx == NULL) {
- qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
- qeth_flush_buffers(queue, index, 1);
- } else {
- flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
- WARN_ON(buffers_needed != flush_cnt);
- qeth_flush_buffers(queue, index, flush_cnt);
- }
+ qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
+ qeth_flush_buffers(queue, index, 1);
return 0;
out:
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3236,7 +3184,7 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
- int elements_needed, struct qeth_eddp_context *ctx)
+ int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
int start_index;
@@ -3262,53 +3210,32 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack) {
do_pack = 1;
- if (ctx == NULL) {
- /* does packet fit in current buffer? */
- if ((QETH_MAX_BUFFER_ELEMENTS(card) -
- buffer->next_element_to_fill) < elements_needed) {
- /* ... no -> set state PRIMED */
- atomic_set(&buffer->state,
- QETH_QDIO_BUF_PRIMED);
- flush_count++;
- queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
- buffer = &queue->bufs[queue->next_buf_to_fill];
- /* we did a step forward, so check buffer state
- * again */
- if (atomic_read(&buffer->state) !=
- QETH_QDIO_BUF_EMPTY){
- qeth_flush_buffers(queue, start_index,
+ /* does packet fit in current buffer? */
+ if ((QETH_MAX_BUFFER_ELEMENTS(card) -
+ buffer->next_element_to_fill) < elements_needed) {
+ /* ... no -> set state PRIMED */
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ flush_count++;
+ queue->next_buf_to_fill =
+ (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ /* we did a step forward, so check buffer state
+ * again */
+ if (atomic_read(&buffer->state) !=
+ QETH_QDIO_BUF_EMPTY) {
+ qeth_flush_buffers(queue, start_index,
flush_count);
- atomic_set(&queue->state,
+ atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED);
- return -EBUSY;
- }
- }
- } else {
- /* check if we have enough elements (including following
- * free buffers) to handle eddp context */
- if (qeth_eddp_check_buffers_for_context(queue, ctx)
- < 0) {
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
}
}
- if (ctx == NULL)
- tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
- else {
- tmp = qeth_eddp_fill_buffer(queue, ctx,
- queue->next_buf_to_fill);
- if (tmp < 0) {
- rc = -EBUSY;
- goto out;
- }
- }
+ tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
QDIO_MAX_BUFFERS_PER_Q;
flush_count += tmp;
-out:
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
@@ -4327,6 +4254,7 @@ static struct {
/* 30 */{"tx count"},
{"tx do_QDIO time"},
{"tx do_QDIO count"},
+ {"tx csum"},
};
int qeth_core_get_stats_count(struct net_device *dev)
@@ -4378,6 +4306,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
data[30] = card->perf_stats.outbound_cnt;
data[31] = card->perf_stats.outbound_do_qdio_time;
data[32] = card->perf_stats.outbound_do_qdio_cnt;
+ data[33] = card->perf_stats.tx_csum;
}
EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
diff --git a/drivers/s390/net/qeth_core_offl.c b/drivers/s390/net/qeth_core_offl.c
deleted file mode 100644
index 4080126ca48c..000000000000
--- a/drivers/s390/net/qeth_core_offl.c
+++ /dev/null
@@ -1,699 +0,0 @@
-/*
- * drivers/s390/net/qeth_core_offl.c
- *
- * Copyright IBM Corp. 2007
- * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
- * Frank Blaschka <frank.blaschka@de.ibm.com>
- */
-
-#include <linux/errno.h>
-#include <linux/ip.h>
-#include <linux/inetdevice.h>
-#include <linux/netdevice.h>
-#include <linux/kernel.h>
-#include <linux/tcp.h>
-#include <net/tcp.h>
-#include <linux/skbuff.h>
-
-#include <net/ip.h>
-#include <net/ip6_checksum.h>
-
-#include "qeth_core.h"
-#include "qeth_core_mpc.h"
-#include "qeth_core_offl.h"
-
-int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
- struct qeth_eddp_context *ctx)
-{
- int index = queue->next_buf_to_fill;
- int elements_needed = ctx->num_elements;
- int elements_in_buffer;
- int skbs_in_buffer;
- int buffers_needed = 0;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpcbfc");
- while (elements_needed > 0) {
- buffers_needed++;
- if (atomic_read(&queue->bufs[index].state) !=
- QETH_QDIO_BUF_EMPTY)
- return -EBUSY;
-
- elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
- queue->bufs[index].next_element_to_fill;
- skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
- elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
- index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
- }
- return buffers_needed;
-}
-
-static void qeth_eddp_free_context(struct qeth_eddp_context *ctx)
-{
- int i;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpfctx");
- for (i = 0; i < ctx->num_pages; ++i)
- free_page((unsigned long)ctx->pages[i]);
- kfree(ctx->pages);
- kfree(ctx->elements);
- kfree(ctx);
-}
-
-
-static void qeth_eddp_get_context(struct qeth_eddp_context *ctx)
-{
- atomic_inc(&ctx->refcnt);
-}
-
-void qeth_eddp_put_context(struct qeth_eddp_context *ctx)
-{
- if (atomic_dec_return(&ctx->refcnt) == 0)
- qeth_eddp_free_context(ctx);
-}
-EXPORT_SYMBOL_GPL(qeth_eddp_put_context);
-
-void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
-{
- struct qeth_eddp_context_reference *ref;
-
- QETH_DBF_TEXT(TRACE, 6, "eddprctx");
- while (!list_empty(&buf->ctx_list)) {
- ref = list_entry(buf->ctx_list.next,
- struct qeth_eddp_context_reference, list);
- qeth_eddp_put_context(ref->ctx);
- list_del(&ref->list);
- kfree(ref);
- }
-}
-
-static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
- struct qeth_eddp_context *ctx)
-{
- struct qeth_eddp_context_reference *ref;
-
- QETH_DBF_TEXT(TRACE, 6, "eddprfcx");
- ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
- if (ref == NULL)
- return -ENOMEM;
- qeth_eddp_get_context(ctx);
- ref->ctx = ctx;
- list_add_tail(&ref->list, &buf->ctx_list);
- return 0;
-}
-
-int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_eddp_context *ctx, int index)
-{
- struct qeth_qdio_out_buffer *buf = NULL;
- struct qdio_buffer *buffer;
- int elements = ctx->num_elements;
- int element = 0;
- int flush_cnt = 0;
- int must_refcnt = 1;
- int i;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpfibu");
- while (elements > 0) {
- buf = &queue->bufs[index];
- if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) {
- /* normally this should not happen since we checked for
- * available elements in qeth_check_elements_for_context
- */
- if (element == 0)
- return -EBUSY;
- else {
- QETH_DBF_MESSAGE(2, "could only partially fill"
- "eddp buffer!\n");
- goto out;
- }
- }
- /* check if the whole next skb fits into current buffer */
- if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
- buf->next_element_to_fill)
- < ctx->elements_per_skb){
- /* no -> go to next buffer */
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
- flush_cnt++;
- /* new buffer, so we have to add ctx to buffer'ctx_list
- * and increment ctx's refcnt */
- must_refcnt = 1;
- continue;
- }
- if (must_refcnt) {
- must_refcnt = 0;
- if (qeth_eddp_buf_ref_context(buf, ctx)) {
- goto out_check;
- }
- }
- buffer = buf->buffer;
- /* fill one skb into buffer */
- for (i = 0; i < ctx->elements_per_skb; ++i) {
- if (ctx->elements[element].length != 0) {
- buffer->element[buf->next_element_to_fill].
- addr = ctx->elements[element].addr;
- buffer->element[buf->next_element_to_fill].
- length = ctx->elements[element].length;
- buffer->element[buf->next_element_to_fill].
- flags = ctx->elements[element].flags;
- buf->next_element_to_fill++;
- }
- element++;
- elements--;
- }
- }
-out_check:
- if (!queue->do_pack) {
- QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
- /* set state to PRIMED -> will be flushed */
- if (buf->next_element_to_fill > 0) {
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- flush_cnt++;
- }
- } else {
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.skbs_sent_pack++;
- QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
- if (buf->next_element_to_fill >=
- QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
- /*
- * packed buffer if full -> set state PRIMED
- * -> will be flushed
- */
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- flush_cnt++;
- }
- }
-out:
- return flush_cnt;
-}
-
-static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
- struct qeth_eddp_data *eddp, int data_len)
-{
- u8 *page;
- int page_remainder;
- int page_offset;
- int pkt_len;
- struct qeth_eddp_element *element;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpcrsh");
- page = ctx->pages[ctx->offset >> PAGE_SHIFT];
- page_offset = ctx->offset % PAGE_SIZE;
- element = &ctx->elements[ctx->num_elements];
- pkt_len = eddp->nhl + eddp->thl + data_len;
- /* FIXME: layer2 and VLAN !!! */
- if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
- pkt_len += ETH_HLEN;
- if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
- pkt_len += VLAN_HLEN;
- /* does complete packet fit in current page ? */
- page_remainder = PAGE_SIZE - page_offset;
- if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) {
- /* no -> go to start of next page */
- ctx->offset += page_remainder;
- page = ctx->pages[ctx->offset >> PAGE_SHIFT];
- page_offset = 0;
- }
- memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
- element->addr = page + page_offset;
- element->length = sizeof(struct qeth_hdr);
- ctx->offset += sizeof(struct qeth_hdr);
- page_offset += sizeof(struct qeth_hdr);
- /* add mac header (?) */
- if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
- memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
- element->length += ETH_HLEN;
- ctx->offset += ETH_HLEN;
- page_offset += ETH_HLEN;
- }
- /* add VLAN tag */
- if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
- memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
- element->length += VLAN_HLEN;
- ctx->offset += VLAN_HLEN;
- page_offset += VLAN_HLEN;
- }
- /* add network header */
- memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
- element->length += eddp->nhl;
- eddp->nh_in_ctx = page + page_offset;
- ctx->offset += eddp->nhl;
- page_offset += eddp->nhl;
- /* add transport header */
- memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
- element->length += eddp->thl;
- eddp->th_in_ctx = page + page_offset;
- ctx->offset += eddp->thl;
-}
-
-static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp,
- int len, __wsum *hcsum)
-{
- struct skb_frag_struct *frag;
- int left_in_frag;
- int copy_len;
- u8 *src;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpcdtc");
- if (skb_shinfo(eddp->skb)->nr_frags == 0) {
- skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
- dst, len);
- *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
- *hcsum);
- eddp->skb_offset += len;
- } else {
- while (len > 0) {
- if (eddp->frag < 0) {
- /* we're in skb->data */
- left_in_frag = (eddp->skb->len -
- eddp->skb->data_len)
- - eddp->skb_offset;
- src = eddp->skb->data + eddp->skb_offset;
- } else {
- frag = &skb_shinfo(eddp->skb)->frags[
- eddp->frag];
- left_in_frag = frag->size - eddp->frag_offset;
- src = (u8 *)((page_to_pfn(frag->page) <<
- PAGE_SHIFT) + frag->page_offset +
- eddp->frag_offset);
- }
- if (left_in_frag <= 0) {
- eddp->frag++;
- eddp->frag_offset = 0;
- continue;
- }
- copy_len = min(left_in_frag, len);
- memcpy(dst, src, copy_len);
- *hcsum = csum_partial(src, copy_len, *hcsum);
- dst += copy_len;
- eddp->frag_offset += copy_len;
- eddp->skb_offset += copy_len;
- len -= copy_len;
- }
- }
-}
-
-static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
- struct qeth_eddp_data *eddp, int data_len, __wsum hcsum)
-{
- u8 *page;
- int page_remainder;
- int page_offset;
- struct qeth_eddp_element *element;
- int first_lap = 1;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpcsdt");
- page = ctx->pages[ctx->offset >> PAGE_SHIFT];
- page_offset = ctx->offset % PAGE_SIZE;
- element = &ctx->elements[ctx->num_elements];
- while (data_len) {
- page_remainder = PAGE_SIZE - page_offset;
- if (page_remainder < data_len) {
- qeth_eddp_copy_data_tcp(page + page_offset, eddp,
- page_remainder, &hcsum);
- element->length += page_remainder;
- if (first_lap)
- element->flags = SBAL_FLAGS_FIRST_FRAG;
- else
- element->flags = SBAL_FLAGS_MIDDLE_FRAG;
- ctx->num_elements++;
- element++;
- data_len -= page_remainder;
- ctx->offset += page_remainder;
- page = ctx->pages[ctx->offset >> PAGE_SHIFT];
- page_offset = 0;
- element->addr = page + page_offset;
- } else {
- qeth_eddp_copy_data_tcp(page + page_offset, eddp,
- data_len, &hcsum);
- element->length += data_len;
- if (!first_lap)
- element->flags = SBAL_FLAGS_LAST_FRAG;
- ctx->num_elements++;
- ctx->offset += data_len;
- data_len = 0;
- }
- first_lap = 0;
- }
- ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
-}
-
-static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp,
- int data_len)
-{
- __wsum phcsum; /* pseudo header checksum */
-
- QETH_DBF_TEXT(TRACE, 5, "eddpckt4");
- eddp->th.tcp.h.check = 0;
- /* compute pseudo header checksum */
- phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
- eddp->thl + data_len, IPPROTO_TCP, 0);
- /* compute checksum of tcp header */
- return csum_partial(&eddp->th, eddp->thl, phcsum);
-}
-
-static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp,
- int data_len)
-{
- __be32 proto;
- __wsum phcsum; /* pseudo header checksum */
-
- QETH_DBF_TEXT(TRACE, 5, "eddpckt6");
- eddp->th.tcp.h.check = 0;
- /* compute pseudo header checksum */
- phcsum = csum_partial(&eddp->nh.ip6.h.saddr,
- sizeof(struct in6_addr), 0);
- phcsum = csum_partial(&eddp->nh.ip6.h.daddr,
- sizeof(struct in6_addr), phcsum);
- proto = htonl(IPPROTO_TCP);
- phcsum = csum_partial(&proto, sizeof(u32), phcsum);
- return phcsum;
-}
-
-static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh,
- u8 *nh, u8 nhl, u8 *th, u8 thl)
-{
- struct qeth_eddp_data *eddp;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpcrda");
- eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
- if (eddp) {
- eddp->nhl = nhl;
- eddp->thl = thl;
- memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
- memcpy(&eddp->nh, nh, nhl);
- memcpy(&eddp->th, th, thl);
- eddp->frag = -1; /* initially we're in skb->data */
- }
- return eddp;
-}
-
-static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
- struct qeth_eddp_data *eddp)
-{
- struct tcphdr *tcph;
- int data_len;
- __wsum hcsum;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpftcp");
- eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
- if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
- eddp->skb_offset += sizeof(struct ethhdr);
- if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
- eddp->skb_offset += VLAN_HLEN;
- }
- tcph = tcp_hdr(eddp->skb);
- while (eddp->skb_offset < eddp->skb->len) {
- data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
- (int)(eddp->skb->len - eddp->skb_offset));
- /* prepare qdio hdr */
- if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
- eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
- eddp->nhl + eddp->thl;
- if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
- eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
- } else
- eddp->qh.hdr.l3.length = data_len + eddp->nhl +
- eddp->thl;
- /* prepare ip hdr */
- if (eddp->skb->protocol == htons(ETH_P_IP)) {
- eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
- eddp->thl);
- eddp->nh.ip4.h.check = 0;
- eddp->nh.ip4.h.check =
- ip_fast_csum((u8 *)&eddp->nh.ip4.h,
- eddp->nh.ip4.h.ihl);
- } else
- eddp->nh.ip6.h.payload_len = htons(data_len +
- eddp->thl);
- /* prepare tcp hdr */
- if (data_len == (eddp->skb->len - eddp->skb_offset)) {
- /* last segment -> set FIN and PSH flags */
- eddp->th.tcp.h.fin = tcph->fin;
- eddp->th.tcp.h.psh = tcph->psh;
- }
- if (eddp->skb->protocol == htons(ETH_P_IP))
- hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
- else
- hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
- /* fill the next segment into the context */
- qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
- qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
- if (eddp->skb_offset >= eddp->skb->len)
- break;
- /* prepare headers for next round */
- if (eddp->skb->protocol == htons(ETH_P_IP))
- eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
- eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) +
- data_len);
- }
-}
-
-static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
- struct sk_buff *skb, struct qeth_hdr *qhdr)
-{
- struct qeth_eddp_data *eddp = NULL;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpficx");
- /* create our segmentation headers and copy original headers */
- if (skb->protocol == htons(ETH_P_IP))
- eddp = qeth_eddp_create_eddp_data(qhdr,
- skb_network_header(skb),
- ip_hdrlen(skb),
- skb_transport_header(skb),
- tcp_hdrlen(skb));
- else
- eddp = qeth_eddp_create_eddp_data(qhdr,
- skb_network_header(skb),
- sizeof(struct ipv6hdr),
- skb_transport_header(skb),
- tcp_hdrlen(skb));
-
- if (eddp == NULL) {
- QETH_DBF_TEXT(TRACE, 2, "eddpfcnm");
- return -ENOMEM;
- }
- if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
- skb_set_mac_header(skb, sizeof(struct qeth_hdr));
- memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
- if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
- eddp->vlan[0] = skb->protocol;
- eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
- }
- }
- /* the next flags will only be set on the last segment */
- eddp->th.tcp.h.fin = 0;
- eddp->th.tcp.h.psh = 0;
- eddp->skb = skb;
- /* begin segmentation and fill context */
- __qeth_eddp_fill_context_tcp(ctx, eddp);
- kfree(eddp);
- return 0;
-}
-
-static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx,
- struct sk_buff *skb, int hdr_len)
-{
- int skbs_per_page;
-
- QETH_DBF_TEXT(TRACE, 5, "eddpcanp");
- /* can we put multiple skbs in one page? */
- skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
- if (skbs_per_page > 1) {
- ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
- skbs_per_page + 1;
- ctx->elements_per_skb = 1;
- } else {
- /* no -> how many elements per skb? */
- ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
- PAGE_SIZE) >> PAGE_SHIFT;
- ctx->num_pages = ctx->elements_per_skb *
- (skb_shinfo(skb)->gso_segs + 1);
- }
- ctx->num_elements = ctx->elements_per_skb *
- (skb_shinfo(skb)->gso_segs + 1);
-}
-
-static struct qeth_eddp_context *qeth_eddp_create_context_generic(
- struct qeth_card *card, struct sk_buff *skb, int hdr_len)
-{
- struct qeth_eddp_context *ctx = NULL;
- u8 *addr;
- int i;
-
- QETH_DBF_TEXT(TRACE, 5, "creddpcg");
- /* create the context and allocate pages */
- ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
- if (ctx == NULL) {
- QETH_DBF_TEXT(TRACE, 2, "ceddpcn1");
- return NULL;
- }
- ctx->type = QETH_LARGE_SEND_EDDP;
- qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
- if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) {
- QETH_DBF_TEXT(TRACE, 2, "ceddpcis");
- kfree(ctx);
- return NULL;
- }
- ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
- if (ctx->pages == NULL) {
- QETH_DBF_TEXT(TRACE, 2, "ceddpcn2");
- kfree(ctx);
- return NULL;
- }
- for (i = 0; i < ctx->num_pages; ++i) {
- addr = (u8 *)get_zeroed_page(GFP_ATOMIC);
- if (addr == NULL) {
- QETH_DBF_TEXT(TRACE, 2, "ceddpcn3");
- ctx->num_pages = i;
- qeth_eddp_free_context(ctx);
- return NULL;
- }
- ctx->pages[i] = addr;
- }
- ctx->elements = kcalloc(ctx->num_elements,
- sizeof(struct qeth_eddp_element), GFP_ATOMIC);
- if (ctx->elements == NULL) {
- QETH_DBF_TEXT(TRACE, 2, "ceddpcn4");
- qeth_eddp_free_context(ctx);
- return NULL;
- }
- /* reset num_elements; will be incremented again in fill_buffer to
- * reflect number of actually used elements */
- ctx->num_elements = 0;
- return ctx;
-}
-
-static struct qeth_eddp_context *qeth_eddp_create_context_tcp(
- struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr *qhdr)
-{
- struct qeth_eddp_context *ctx = NULL;
-
- QETH_DBF_TEXT(TRACE, 5, "creddpct");
- if (skb->protocol == htons(ETH_P_IP))
- ctx = qeth_eddp_create_context_generic(card, skb,
- (sizeof(struct qeth_hdr) +
- ip_hdrlen(skb) +
- tcp_hdrlen(skb)));
- else if (skb->protocol == htons(ETH_P_IPV6))
- ctx = qeth_eddp_create_context_generic(card, skb,
- sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
- tcp_hdrlen(skb));
- else
- QETH_DBF_TEXT(TRACE, 2, "cetcpinv");
-
- if (ctx == NULL) {
- QETH_DBF_TEXT(TRACE, 2, "creddpnl");
- return NULL;
- }
- if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) {
- QETH_DBF_TEXT(TRACE, 2, "ceddptfe");
- qeth_eddp_free_context(ctx);
- return NULL;
- }
- atomic_set(&ctx->refcnt, 1);
- return ctx;
-}
-
-struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card,
- struct sk_buff *skb, struct qeth_hdr *qhdr,
- unsigned char sk_protocol)
-{
- QETH_DBF_TEXT(TRACE, 5, "creddpc");
- switch (sk_protocol) {
- case IPPROTO_TCP:
- return qeth_eddp_create_context_tcp(card, skb, qhdr);
- default:
- QETH_DBF_TEXT(TRACE, 2, "eddpinvp");
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(qeth_eddp_create_context);
-
-void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr,
- struct sk_buff *skb)
-{
- struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
- struct tcphdr *tcph = tcp_hdr(skb);
- struct iphdr *iph = ip_hdr(skb);
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
-
- QETH_DBF_TEXT(TRACE, 5, "tsofhdr");
-
- /*fix header to TSO values ...*/
- hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
- /*set values which are fix for the first approach ...*/
- hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
- hdr->ext.imb_hdr_no = 1;
- hdr->ext.hdr_type = 1;
- hdr->ext.hdr_version = 1;
- hdr->ext.hdr_len = 28;
- /*insert non-fix values */
- hdr->ext.mss = skb_shinfo(skb)->gso_size;
- hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
- hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
- sizeof(struct qeth_hdr_tso));
- tcph->check = 0;
- if (skb->protocol == ETH_P_IPV6) {
- ip6h->payload_len = 0;
- tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
- 0, IPPROTO_TCP, 0);
- } else {
- /*OSA want us to set these values ...*/
- tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- 0, IPPROTO_TCP, 0);
- iph->tot_len = 0;
- iph->check = 0;
- }
-}
-EXPORT_SYMBOL_GPL(qeth_tso_fill_header);
-
-void qeth_tx_csum(struct sk_buff *skb)
-{
- int tlen;
- if (skb->protocol == htons(ETH_P_IP)) {
- tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- tcp_hdr(skb)->check = 0;
- tcp_hdr(skb)->check = csum_tcpudp_magic(
- ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
- tlen, ip_hdr(skb)->protocol,
- skb_checksum(skb, skb_transport_offset(skb),
- tlen, 0));
- break;
- case IPPROTO_UDP:
- udp_hdr(skb)->check = 0;
- udp_hdr(skb)->check = csum_tcpudp_magic(
- ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
- tlen, ip_hdr(skb)->protocol,
- skb_checksum(skb, skb_transport_offset(skb),
- tlen, 0));
- break;
- }
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
- switch (ipv6_hdr(skb)->nexthdr) {
- case IPPROTO_TCP:
- tcp_hdr(skb)->check = 0;
- tcp_hdr(skb)->check = csum_ipv6_magic(
- &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
- ipv6_hdr(skb)->payload_len,
- ipv6_hdr(skb)->nexthdr,
- skb_checksum(skb, skb_transport_offset(skb),
- ipv6_hdr(skb)->payload_len, 0));
- break;
- case IPPROTO_UDP:
- udp_hdr(skb)->check = 0;
- udp_hdr(skb)->check = csum_ipv6_magic(
- &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
- ipv6_hdr(skb)->payload_len,
- ipv6_hdr(skb)->nexthdr,
- skb_checksum(skb, skb_transport_offset(skb),
- ipv6_hdr(skb)->payload_len, 0));
- break;
- }
- }
-}
-EXPORT_SYMBOL_GPL(qeth_tx_csum);
diff --git a/drivers/s390/net/qeth_core_offl.h b/drivers/s390/net/qeth_core_offl.h
deleted file mode 100644
index 86bf7df8cf16..000000000000
--- a/drivers/s390/net/qeth_core_offl.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * drivers/s390/net/qeth_core_offl.h
- *
- * Copyright IBM Corp. 2007
- * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
- * Frank Blaschka <frank.blaschka@de.ibm.com>
- */
-
-#ifndef __QETH_CORE_OFFL_H__
-#define __QETH_CORE_OFFL_H__
-
-struct qeth_eddp_element {
- u32 flags;
- u32 length;
- void *addr;
-};
-
-struct qeth_eddp_context {
- atomic_t refcnt;
- enum qeth_large_send_types type;
- int num_pages; /* # of allocated pages */
- u8 **pages; /* pointers to pages */
- int offset; /* offset in ctx during creation */
- int num_elements; /* # of required 'SBALEs' */
- struct qeth_eddp_element *elements; /* array of 'SBALEs' */
- int elements_per_skb; /* # of 'SBALEs' per skb **/
-};
-
-struct qeth_eddp_context_reference {
- struct list_head list;
- struct qeth_eddp_context *ctx;
-};
-
-struct qeth_eddp_data {
- struct qeth_hdr qh;
- struct ethhdr mac;
- __be16 vlan[2];
- union {
- struct {
- struct iphdr h;
- u8 options[40];
- } ip4;
- struct {
- struct ipv6hdr h;
- } ip6;
- } nh;
- u8 nhl;
- void *nh_in_ctx; /* address of nh within the ctx */
- union {
- struct {
- struct tcphdr h;
- u8 options[40];
- } tcp;
- } th;
- u8 thl;
- void *th_in_ctx; /* address of th within the ctx */
- struct sk_buff *skb;
- int skb_offset;
- int frag;
- int frag_offset;
-} __attribute__ ((packed));
-
-extern struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *,
- struct sk_buff *, struct qeth_hdr *, unsigned char);
-extern void qeth_eddp_put_context(struct qeth_eddp_context *);
-extern int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,
- struct qeth_eddp_context *, int);
-extern void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
-extern int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
- struct qeth_eddp_context *);
-
-void qeth_tso_fill_header(struct qeth_card *, struct qeth_hdr *,
- struct sk_buff *);
-void qeth_tx_csum(struct sk_buff *skb);
-
-#endif /* __QETH_CORE_EDDP_H__ */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index c26e842ad905..568465d7517f 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -427,8 +427,6 @@ static ssize_t qeth_dev_large_send_show(struct device *dev,
switch (card->options.large_send) {
case QETH_LARGE_SEND_NO:
return sprintf(buf, "%s\n", "no");
- case QETH_LARGE_SEND_EDDP:
- return sprintf(buf, "%s\n", "EDDP");
case QETH_LARGE_SEND_TSO:
return sprintf(buf, "%s\n", "TSO");
default:
@@ -449,8 +447,6 @@ static ssize_t qeth_dev_large_send_store(struct device *dev,
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "no")) {
type = QETH_LARGE_SEND_NO;
- } else if (!strcmp(tmp, "EDDP")) {
- type = QETH_LARGE_SEND_EDDP;
} else if (!strcmp(tmp, "TSO")) {
type = QETH_LARGE_SEND_TSO;
} else {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 07ab8a5c1c46..172031baedc1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -21,7 +21,6 @@
#include <linux/ip.h>
#include "qeth_core.h"
-#include "qeth_core_offl.h"
static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
@@ -328,6 +327,10 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct qeth_vlan_vid *id;
QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
+ if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+ QETH_DBF_TEXT(TRACE, 3, "aidREC");
+ return;
+ }
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
if (id) {
id->vid = vid;
@@ -344,6 +347,10 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+ QETH_DBF_TEXT(TRACE, 3, "kidREC");
+ return;
+ }
spin_lock_bh(&card->vlanlock);
list_for_each_entry(id, &card->vid_list, list) {
if (id->vid == vid) {
@@ -379,7 +386,8 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
dev_close(card->dev);
rtnl_unlock();
}
- if (!card->use_hard_stop) {
+ if (!card->use_hard_stop ||
+ recovery_mode) {
__u8 *mac = &card->dev->dev_addr[0];
rc = qeth_l2_send_delmac(card, mac);
QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
@@ -388,7 +396,8 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
}
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_l2_process_vlans(card, 1);
- if (!card->use_hard_stop)
+ if (!card->use_hard_stop ||
+ recovery_mode)
qeth_l2_del_all_mc(card);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
@@ -593,6 +602,10 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
}
QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN);
+ if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+ QETH_DBF_TEXT(TRACE, 3, "setmcREC");
+ return -ERESTARTSYS;
+ }
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
if (!rc)
rc = qeth_l2_send_setmac(card, addr->sa_data);
@@ -608,6 +621,9 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
return ;
QETH_DBF_TEXT(TRACE, 3, "setmulti");
+ if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
+ (card->state != CARD_STATE_UP))
+ return;
qeth_l2_del_all_mc(card);
spin_lock_bh(&card->mclock);
for (dm = dev->mc_list; dm; dm = dm->next)
@@ -634,8 +650,6 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct qeth_qdio_out_q *queue = card->qdio.out_qs
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
int tx_bytes = skb->len;
- enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
- struct qeth_eddp_context *ctx = NULL;
int data_offset = -1;
int elements_needed = 0;
int hd_len = 0;
@@ -655,14 +669,10 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
netif_stop_queue(dev);
- if (skb_is_gso(skb))
- large_send = QETH_LARGE_SEND_EDDP;
-
if (card->info.type == QETH_CARD_TYPE_OSN)
hdr = (struct qeth_hdr *)skb->data;
else {
- if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
- (skb_shinfo(skb)->nr_frags == 0)) {
+ if (card->info.type == QETH_CARD_TYPE_IQD) {
new_skb = skb;
data_offset = ETH_HLEN;
hd_len = ETH_HLEN;
@@ -689,59 +699,26 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (large_send == QETH_LARGE_SEND_EDDP) {
- ctx = qeth_eddp_create_context(card, new_skb, hdr,
- skb->sk->sk_protocol);
- if (ctx == NULL) {
- QETH_DBF_MESSAGE(2, "could not create eddp context\n");
- goto tx_drop;
- }
- } else {
- elements = qeth_get_elements_no(card, (void *)hdr, new_skb,
+ elements = qeth_get_elements_no(card, (void *)hdr, new_skb,
elements_needed);
- if (!elements) {
- if (data_offset >= 0)
- kmem_cache_free(qeth_core_header_cache, hdr);
- goto tx_drop;
- }
+ if (!elements) {
+ if (data_offset >= 0)
+ kmem_cache_free(qeth_core_header_cache, hdr);
+ goto tx_drop;
}
- if ((large_send == QETH_LARGE_SEND_NO) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
- qeth_tx_csum(new_skb);
-
if (card->info.type != QETH_CARD_TYPE_IQD)
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
- elements, ctx);
+ elements);
else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
- elements, ctx, data_offset, hd_len);
+ elements, data_offset, hd_len);
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
if (new_skb != skb)
dev_kfree_skb_any(skb);
- if (card->options.performance_stats) {
- if (large_send != QETH_LARGE_SEND_NO) {
- card->perf_stats.large_send_bytes += tx_bytes;
- card->perf_stats.large_send_cnt++;
- }
- if (skb_shinfo(new_skb)->nr_frags > 0) {
- card->perf_stats.sg_skbs_sent++;
- /* nr_frags + skb->data */
- card->perf_stats.sg_frags_sent +=
- skb_shinfo(new_skb)->nr_frags + 1;
- }
- }
-
- if (ctx != NULL) {
- qeth_eddp_put_context(ctx);
- dev_kfree_skb_any(new_skb);
- }
} else {
- if (ctx != NULL)
- qeth_eddp_put_context(ctx);
-
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
@@ -878,30 +855,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
return;
}
-static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data)
-{
- struct qeth_card *card = dev->ml_priv;
-
- if (data) {
- if (card->options.large_send == QETH_LARGE_SEND_NO) {
- card->options.large_send = QETH_LARGE_SEND_EDDP;
- dev->features |= NETIF_F_TSO;
- }
- } else {
- dev->features &= ~NETIF_F_TSO;
- card->options.large_send = QETH_LARGE_SEND_NO;
- }
- return 0;
-}
-
static struct ethtool_ops qeth_l2_ethtool_ops = {
.get_link = ethtool_op_get_link,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = qeth_l2_ethtool_set_tso,
.get_strings = qeth_core_get_strings,
.get_ethtool_stats = qeth_core_get_ethtool_stats,
.get_stats_count = qeth_core_get_stats_count,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3d04920b9bb9..0ba3817cb6a7 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -19,15 +19,15 @@
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ip.h>
-#include <linux/reboot.h>
+#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <net/ip.h>
#include <net/arp.h>
+#include <net/ip6_checksum.h>
#include "qeth_l3.h"
-#include "qeth_core_offl.h"
static int qeth_l3_set_offline(struct ccwgroup_device *);
static int qeth_l3_recover(void *);
@@ -1038,7 +1038,7 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
rc = qeth_query_setadapterparms(card);
if (rc) {
QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: "
- "0x%x\n", card->gdev->dev.bus_id, rc);
+ "0x%x\n", dev_name(&card->gdev->dev), rc);
return rc;
}
if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
@@ -1838,6 +1838,10 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
unsigned long flags;
QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+ QETH_DBF_TEXT(TRACE, 3, "kidREC");
+ return;
+ }
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
qeth_l3_free_vlan_addresses(card, vid);
@@ -2101,6 +2105,9 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT(TRACE, 3, "setmulti");
+ if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
+ (card->state != CARD_STATE_UP))
+ return;
qeth_l3_delete_mc_addresses(card);
qeth_l3_add_multicast_ipv4(card);
#ifdef CONFIG_QETH_IPV6
@@ -2577,12 +2584,63 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
}
}
+static void qeth_tso_fill_header(struct qeth_card *card,
+ struct qeth_hdr *qhdr, struct sk_buff *skb)
+{
+ struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
+ struct tcphdr *tcph = tcp_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ /*fix header to TSO values ...*/
+ hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
+ /*set values which are fix for the first approach ...*/
+ hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
+ hdr->ext.imb_hdr_no = 1;
+ hdr->ext.hdr_type = 1;
+ hdr->ext.hdr_version = 1;
+ hdr->ext.hdr_len = 28;
+ /*insert non-fix values */
+ hdr->ext.mss = skb_shinfo(skb)->gso_size;
+ hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
+ hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
+ sizeof(struct qeth_hdr_tso));
+ tcph->check = 0;
+ if (skb->protocol == ETH_P_IPV6) {
+ ip6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else {
+ /*OSA want us to set these values ...*/
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ iph->tot_len = 0;
+ iph->check = 0;
+ }
+}
+
+static void qeth_tx_csum(struct sk_buff *skb)
+{
+ __wsum csum;
+ int offset;
+
+ skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
+ offset = skb->csum_start - skb_headroom(skb);
+ BUG_ON(offset >= skb_headlen(skb));
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
+
+ offset += skb->csum_offset;
+ BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+}
+
static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int rc;
u16 *tag;
struct qeth_hdr *hdr = NULL;
int elements_needed = 0;
+ int elems;
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = NULL;
int ipv = qeth_get_ip_version(skb);
@@ -2591,8 +2649,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
int tx_bytes = skb->len;
enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
- struct qeth_eddp_context *ctx = NULL;
int data_offset = -1;
+ int nr_frags;
if ((card->info.type == QETH_CARD_TYPE_IQD) &&
(skb->protocol != htons(ETH_P_IPV6)) &&
@@ -2615,6 +2673,12 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb_is_gso(skb))
large_send = card->options.large_send;
+ else
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(skb);
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
(skb_shinfo(skb)->nr_frags == 0)) {
@@ -2661,12 +2725,13 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* fix hardware limitation: as long as we do not have sbal
- * chaining we can not send long frag lists so we temporary
- * switch to EDDP
+ * chaining we can not send long frag lists
*/
if ((large_send == QETH_LARGE_SEND_TSO) &&
- ((skb_shinfo(new_skb)->nr_frags + 2) > 16))
- large_send = QETH_LARGE_SEND_EDDP;
+ ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) {
+ if (skb_linearize(new_skb))
+ goto tx_drop;
+ }
if ((large_send == QETH_LARGE_SEND_TSO) &&
(cast_type == RTN_UNSPEC)) {
@@ -2689,37 +2754,22 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (large_send == QETH_LARGE_SEND_EDDP) {
- /* new_skb is not owned by a socket so we use skb to get
- * the protocol
- */
- ctx = qeth_eddp_create_context(card, new_skb, hdr,
- skb->sk->sk_protocol);
- if (ctx == NULL) {
- QETH_DBF_MESSAGE(2, "could not create eddp context\n");
- goto tx_drop;
- }
- } else {
- int elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
+ elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
elements_needed);
- if (!elems) {
- if (data_offset >= 0)
- kmem_cache_free(qeth_core_header_cache, hdr);
- goto tx_drop;
- }
- elements_needed += elems;
+ if (!elems) {
+ if (data_offset >= 0)
+ kmem_cache_free(qeth_core_header_cache, hdr);
+ goto tx_drop;
}
-
- if ((large_send == QETH_LARGE_SEND_NO) &&
- (new_skb->ip_summed == CHECKSUM_PARTIAL))
- qeth_tx_csum(new_skb);
+ elements_needed += elems;
+ nr_frags = skb_shinfo(new_skb)->nr_frags;
if (card->info.type != QETH_CARD_TYPE_IQD)
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
- elements_needed, ctx);
+ elements_needed);
else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
- elements_needed, ctx, data_offset, 0);
+ elements_needed, data_offset, 0);
if (!rc) {
card->stats.tx_packets++;
@@ -2731,22 +2781,13 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
card->perf_stats.large_send_bytes += tx_bytes;
card->perf_stats.large_send_cnt++;
}
- if (skb_shinfo(new_skb)->nr_frags > 0) {
+ if (nr_frags) {
card->perf_stats.sg_skbs_sent++;
/* nr_frags + skb->data */
- card->perf_stats.sg_frags_sent +=
- skb_shinfo(new_skb)->nr_frags + 1;
+ card->perf_stats.sg_frags_sent += nr_frags + 1;
}
}
-
- if (ctx != NULL) {
- qeth_eddp_put_context(ctx);
- dev_kfree_skb_any(new_skb);
- }
} else {
- if (ctx != NULL)
- qeth_eddp_put_context(ctx);
-
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
@@ -2841,7 +2882,7 @@ static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
if (data) {
if (card->options.large_send == QETH_LARGE_SEND_NO) {
if (card->info.type == QETH_CARD_TYPE_IQD)
- card->options.large_send = QETH_LARGE_SEND_EDDP;
+ return -EPERM;
else
card->options.large_send = QETH_LARGE_SEND_TSO;
dev->features |= NETIF_F_TSO;
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
deleted file mode 100644
index 92b0417f8e12..000000000000
--- a/drivers/s390/s390mach.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- * drivers/s390/s390mach.c
- * S/390 machine check handler
- *
- * Copyright IBM Corp. 2000,2008
- * Author(s): Ingo Adlung (adlung@de.ibm.com)
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- * Cornelia Huck <cornelia.huck@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/workqueue.h>
-#include <linux/time.h>
-#include <linux/device.h>
-#include <linux/kthread.h>
-#include <asm/etr.h>
-#include <asm/lowcore.h>
-#include <asm/cio.h>
-#include <asm/cpu.h>
-#include "s390mach.h"
-
-static struct semaphore m_sem;
-
-static NORET_TYPE void
-s390_handle_damage(char *msg)
-{
-#ifdef CONFIG_SMP
- smp_send_stop();
-#endif
- disabled_wait((unsigned long) __builtin_return_address(0));
- for(;;);
-}
-
-static crw_handler_t crw_handlers[NR_RSCS];
-
-/**
- * s390_register_crw_handler() - register a channel report word handler
- * @rsc: reporting source code to handle
- * @handler: handler to be registered
- *
- * Returns %0 on success and a negative error value otherwise.
- */
-int s390_register_crw_handler(int rsc, crw_handler_t handler)
-{
- if ((rsc < 0) || (rsc >= NR_RSCS))
- return -EINVAL;
- if (!cmpxchg(&crw_handlers[rsc], NULL, handler))
- return 0;
- return -EBUSY;
-}
-
-/**
- * s390_unregister_crw_handler() - unregister a channel report word handler
- * @rsc: reporting source code to handle
- */
-void s390_unregister_crw_handler(int rsc)
-{
- if ((rsc < 0) || (rsc >= NR_RSCS))
- return;
- xchg(&crw_handlers[rsc], NULL);
- synchronize_sched();
-}
-
-/*
- * Retrieve CRWs and call function to handle event.
- */
-static int s390_collect_crw_info(void *param)
-{
- struct crw crw[2];
- int ccode;
- struct semaphore *sem;
- unsigned int chain;
- int ignore;
-
- sem = (struct semaphore *)param;
-repeat:
- ignore = down_interruptible(sem);
- chain = 0;
- while (1) {
- if (unlikely(chain > 1)) {
- struct crw tmp_crw;
-
- printk(KERN_WARNING"%s: Code does not support more "
- "than two chained crws; please report to "
- "linux390@de.ibm.com!\n", __func__);
- ccode = stcrw(&tmp_crw);
- printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
- "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
- __func__, tmp_crw.slct, tmp_crw.oflw,
- tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
- tmp_crw.erc, tmp_crw.rsid);
- printk(KERN_WARNING"%s: This was crw number %x in the "
- "chain\n", __func__, chain);
- if (ccode != 0)
- break;
- chain = tmp_crw.chn ? chain + 1 : 0;
- continue;
- }
- ccode = stcrw(&crw[chain]);
- if (ccode != 0)
- break;
- printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
- "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
- crw[chain].slct, crw[chain].oflw, crw[chain].chn,
- crw[chain].rsc, crw[chain].anc, crw[chain].erc,
- crw[chain].rsid);
- /* Check for overflows. */
- if (crw[chain].oflw) {
- int i;
-
- pr_debug("%s: crw overflow detected!\n", __func__);
- for (i = 0; i < NR_RSCS; i++) {
- if (crw_handlers[i])
- crw_handlers[i](NULL, NULL, 1);
- }
- chain = 0;
- continue;
- }
- if (crw[0].chn && !chain) {
- chain++;
- continue;
- }
- if (crw_handlers[crw[chain].rsc])
- crw_handlers[crw[chain].rsc](&crw[0],
- chain ? &crw[1] : NULL,
- 0);
- /* chain is always 0 or 1 here. */
- chain = crw[chain].chn ? chain + 1 : 0;
- }
- goto repeat;
- return 0;
-}
-
-struct mcck_struct {
- int kill_task;
- int channel_report;
- int warning;
- unsigned long long mcck_code;
-};
-
-static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
-
-/*
- * Main machine check handler function. Will be called with interrupts enabled
- * or disabled and machine checks enabled or disabled.
- */
-void
-s390_handle_mcck(void)
-{
- unsigned long flags;
- struct mcck_struct mcck;
-
- /*
- * Disable machine checks and get the current state of accumulated
- * machine checks. Afterwards delete the old state and enable machine
- * checks again.
- */
- local_irq_save(flags);
- local_mcck_disable();
- mcck = __get_cpu_var(cpu_mcck);
- memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct));
- clear_thread_flag(TIF_MCCK_PENDING);
- local_mcck_enable();
- local_irq_restore(flags);
-
- if (mcck.channel_report)
- up(&m_sem);
-
-#ifdef CONFIG_MACHCHK_WARNING
-/*
- * The warning may remain for a prolonged period on the bare iron.
- * (actually till the machine is powered off, or until the problem is gone)
- * So we just stop listening for the WARNING MCH and prevent continuously
- * being interrupted. One caveat is however, that we must do this per
- * processor and cannot use the smp version of ctl_clear_bit().
- * On VM we only get one interrupt per virtally presented machinecheck.
- * Though one suffices, we may get one interrupt per (virtual) processor.
- */
- if (mcck.warning) { /* WARNING pending ? */
- static int mchchk_wng_posted = 0;
- /*
- * Use single machine clear, as we cannot handle smp right now
- */
- __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
- if (xchg(&mchchk_wng_posted, 1) == 0)
- kill_cad_pid(SIGPWR, 1);
- }
-#endif
-
- if (mcck.kill_task) {
- local_irq_enable();
- printk(KERN_EMERG "mcck: Terminating task because of machine "
- "malfunction (code 0x%016llx).\n", mcck.mcck_code);
- printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
- current->comm, current->pid);
- do_exit(SIGSEGV);
- }
-}
-EXPORT_SYMBOL_GPL(s390_handle_mcck);
-
-/*
- * returns 0 if all registers could be validated
- * returns 1 otherwise
- */
-static int
-s390_revalidate_registers(struct mci *mci)
-{
- int kill_task;
- u64 tmpclock;
- u64 zero;
- void *fpt_save_area, *fpt_creg_save_area;
-
- kill_task = 0;
- zero = 0;
- /* General purpose registers */
- if (!mci->gr)
- /*
- * General purpose registers couldn't be restored and have
- * unknown contents. Process needs to be terminated.
- */
- kill_task = 1;
-
- /* Revalidate floating point registers */
- if (!mci->fp)
- /*
- * Floating point registers can't be restored and
- * therefore the process needs to be terminated.
- */
- kill_task = 1;
-
-#ifndef CONFIG_64BIT
- asm volatile(
- " ld 0,0(%0)\n"
- " ld 2,8(%0)\n"
- " ld 4,16(%0)\n"
- " ld 6,24(%0)"
- : : "a" (&S390_lowcore.floating_pt_save_area));
-#endif
-
- if (MACHINE_HAS_IEEE) {
-#ifdef CONFIG_64BIT
- fpt_save_area = &S390_lowcore.floating_pt_save_area;
- fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
-#else
- fpt_save_area = (void *) S390_lowcore.extended_save_area_addr;
- fpt_creg_save_area = fpt_save_area+128;
-#endif
- /* Floating point control register */
- if (!mci->fc) {
- /*
- * Floating point control register can't be restored.
- * Task will be terminated.
- */
- asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
- kill_task = 1;
-
- } else
- asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
-
- asm volatile(
- " ld 0,0(%0)\n"
- " ld 1,8(%0)\n"
- " ld 2,16(%0)\n"
- " ld 3,24(%0)\n"
- " ld 4,32(%0)\n"
- " ld 5,40(%0)\n"
- " ld 6,48(%0)\n"
- " ld 7,56(%0)\n"
- " ld 8,64(%0)\n"
- " ld 9,72(%0)\n"
- " ld 10,80(%0)\n"
- " ld 11,88(%0)\n"
- " ld 12,96(%0)\n"
- " ld 13,104(%0)\n"
- " ld 14,112(%0)\n"
- " ld 15,120(%0)\n"
- : : "a" (fpt_save_area));
- }
-
- /* Revalidate access registers */
- asm volatile(
- " lam 0,15,0(%0)"
- : : "a" (&S390_lowcore.access_regs_save_area));
- if (!mci->ar)
- /*
- * Access registers have unknown contents.
- * Terminating task.
- */
- kill_task = 1;
-
- /* Revalidate control registers */
- if (!mci->cr)
- /*
- * Control registers have unknown contents.
- * Can't recover and therefore stopping machine.
- */
- s390_handle_damage("invalid control registers.");
- else
-#ifdef CONFIG_64BIT
- asm volatile(
- " lctlg 0,15,0(%0)"
- : : "a" (&S390_lowcore.cregs_save_area));
-#else
- asm volatile(
- " lctl 0,15,0(%0)"
- : : "a" (&S390_lowcore.cregs_save_area));
-#endif
-
- /*
- * We don't even try to revalidate the TOD register, since we simply
- * can't write something sensible into that register.
- */
-
-#ifdef CONFIG_64BIT
- /*
- * See if we can revalidate the TOD programmable register with its
- * old contents (should be zero) otherwise set it to zero.
- */
- if (!mci->pr)
- asm volatile(
- " sr 0,0\n"
- " sckpf"
- : : : "0", "cc");
- else
- asm volatile(
- " l 0,0(%0)\n"
- " sckpf"
- : : "a" (&S390_lowcore.tod_progreg_save_area)
- : "0", "cc");
-#endif
-
- /* Revalidate clock comparator register */
- asm volatile(
- " stck 0(%1)\n"
- " sckc 0(%1)"
- : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
-
- /* Check if old PSW is valid */
- if (!mci->wp)
- /*
- * Can't tell if we come from user or kernel mode
- * -> stopping machine.
- */
- s390_handle_damage("old psw invalid.");
-
- if (!mci->ms || !mci->pm || !mci->ia)
- kill_task = 1;
-
- return kill_task;
-}
-
-#define MAX_IPD_COUNT 29
-#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
-
-/*
- * machine check handler.
- */
-void
-s390_do_machine_check(struct pt_regs *regs)
-{
- static DEFINE_SPINLOCK(ipd_lock);
- static unsigned long long last_ipd;
- static int ipd_count;
- unsigned long long tmp;
- struct mci *mci;
- struct mcck_struct *mcck;
- int umode;
-
- lockdep_off();
-
- s390_idle_check();
-
- mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
- mcck = &__get_cpu_var(cpu_mcck);
- umode = user_mode(regs);
-
- if (mci->sd)
- /* System damage -> stopping machine */
- s390_handle_damage("received system damage machine check.");
-
- if (mci->pd) {
- if (mci->b) {
- /* Processing backup -> verify if we can survive this */
- u64 z_mcic, o_mcic, t_mcic;
-#ifdef CONFIG_64BIT
- z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
- o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
- 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
- 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
- 1ULL<<16);
-#else
- z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
- 1ULL<<29);
- o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
- 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
- 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
-#endif
- t_mcic = *(u64 *)mci;
-
- if (((t_mcic & z_mcic) != 0) ||
- ((t_mcic & o_mcic) != o_mcic)) {
- s390_handle_damage("processing backup machine "
- "check with damage.");
- }
-
- /*
- * Nullifying exigent condition, therefore we might
- * retry this instruction.
- */
-
- spin_lock(&ipd_lock);
-
- tmp = get_clock();
-
- if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
- ipd_count++;
- else
- ipd_count = 1;
-
- last_ipd = tmp;
-
- if (ipd_count == MAX_IPD_COUNT)
- s390_handle_damage("too many ipd retries.");
-
- spin_unlock(&ipd_lock);
- }
- else {
- /* Processing damage -> stopping machine */
- s390_handle_damage("received instruction processing "
- "damage machine check.");
- }
- }
- if (s390_revalidate_registers(mci)) {
- if (umode) {
- /*
- * Couldn't restore all register contents while in
- * user mode -> mark task for termination.
- */
- mcck->kill_task = 1;
- mcck->mcck_code = *(unsigned long long *) mci;
- set_thread_flag(TIF_MCCK_PENDING);
- }
- else
- /*
- * Couldn't restore all register contents while in
- * kernel mode -> stopping machine.
- */
- s390_handle_damage("unable to revalidate registers.");
- }
-
- if (mci->cd) {
- /* Timing facility damage */
- s390_handle_damage("TOD clock damaged");
- }
-
- if (mci->ed && mci->ec) {
- /* External damage */
- if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
- etr_sync_check();
- if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
- etr_switch_to_local();
- if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
- stp_sync_check();
- if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
- stp_island_check();
- }
-
- if (mci->se)
- /* Storage error uncorrected */
- s390_handle_damage("received storage error uncorrected "
- "machine check.");
-
- if (mci->ke)
- /* Storage key-error uncorrected */
- s390_handle_damage("received storage key-error uncorrected "
- "machine check.");
-
- if (mci->ds && mci->fa)
- /* Storage degradation */
- s390_handle_damage("received storage degradation machine "
- "check.");
-
- if (mci->cp) {
- /* Channel report word pending */
- mcck->channel_report = 1;
- set_thread_flag(TIF_MCCK_PENDING);
- }
-
- if (mci->w) {
- /* Warning pending */
- mcck->warning = 1;
- set_thread_flag(TIF_MCCK_PENDING);
- }
- lockdep_on();
-}
-
-/*
- * s390_init_machine_check
- *
- * initialize machine check handling
- */
-static int
-machine_check_init(void)
-{
- init_MUTEX_LOCKED(&m_sem);
- ctl_set_bit(14, 25); /* enable external damage MCH */
- ctl_set_bit(14, 27); /* enable system recovery MCH */
-#ifdef CONFIG_MACHCHK_WARNING
- ctl_set_bit(14, 24); /* enable warning MCH */
-#endif
- return 0;
-}
-
-/*
- * Initialize the machine check handler really early to be able to
- * catch all machine checks that happen during boot
- */
-arch_initcall(machine_check_init);
-
-/*
- * Machine checks for the channel subsystem must be enabled
- * after the channel subsystem is initialized
- */
-static int __init
-machine_check_crw_init (void)
-{
- struct task_struct *task;
-
- task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
- if (IS_ERR(task))
- return PTR_ERR(task);
- ctl_set_bit(14, 28); /* enable channel report MCH */
- return 0;
-}
-
-device_initcall (machine_check_crw_init);
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
deleted file mode 100644
index d39f8b697d27..000000000000
--- a/drivers/s390/s390mach.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * drivers/s390/s390mach.h
- * S/390 data definitions for machine check processing
- *
- * S390 version
- * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Ingo Adlung (adlung@de.ibm.com)
- */
-
-#ifndef __s390mach_h
-#define __s390mach_h
-
-#include <asm/types.h>
-
-struct mci {
- __u32 sd : 1; /* 00 system damage */
- __u32 pd : 1; /* 01 instruction-processing damage */
- __u32 sr : 1; /* 02 system recovery */
- __u32 to_be_defined_1 : 1; /* 03 */
- __u32 cd : 1; /* 04 timing-facility damage */
- __u32 ed : 1; /* 05 external damage */
- __u32 to_be_defined_2 : 1; /* 06 */
- __u32 dg : 1; /* 07 degradation */
- __u32 w : 1; /* 08 warning pending */
- __u32 cp : 1; /* 09 channel-report pending */
- __u32 sp : 1; /* 10 service-processor damage */
- __u32 ck : 1; /* 11 channel-subsystem damage */
- __u32 to_be_defined_3 : 2; /* 12-13 */
- __u32 b : 1; /* 14 backed up */
- __u32 to_be_defined_4 : 1; /* 15 */
- __u32 se : 1; /* 16 storage error uncorrected */
- __u32 sc : 1; /* 17 storage error corrected */
- __u32 ke : 1; /* 18 storage-key error uncorrected */
- __u32 ds : 1; /* 19 storage degradation */
- __u32 wp : 1; /* 20 psw mwp validity */
- __u32 ms : 1; /* 21 psw mask and key validity */
- __u32 pm : 1; /* 22 psw program mask and cc validity */
- __u32 ia : 1; /* 23 psw instruction address validity */
- __u32 fa : 1; /* 24 failing storage address validity */
- __u32 to_be_defined_5 : 1; /* 25 */
- __u32 ec : 1; /* 26 external damage code validity */
- __u32 fp : 1; /* 27 floating point register validity */
- __u32 gr : 1; /* 28 general register validity */
- __u32 cr : 1; /* 29 control register validity */
- __u32 to_be_defined_6 : 1; /* 30 */
- __u32 st : 1; /* 31 storage logical validity */
- __u32 ie : 1; /* 32 indirect storage error */
- __u32 ar : 1; /* 33 access register validity */
- __u32 da : 1; /* 34 delayed access exception */
- __u32 to_be_defined_7 : 7; /* 35-41 */
- __u32 pr : 1; /* 42 tod programmable register validity */
- __u32 fc : 1; /* 43 fp control register validity */
- __u32 ap : 1; /* 44 ancillary report */
- __u32 to_be_defined_8 : 1; /* 45 */
- __u32 ct : 1; /* 46 cpu timer validity */
- __u32 cc : 1; /* 47 clock comparator validity */
- __u32 to_be_defined_9 : 16; /* 47-63 */
-};
-
-/*
- * Channel Report Word
- */
-struct crw {
- __u32 res1 : 1; /* reserved zero */
- __u32 slct : 1; /* solicited */
- __u32 oflw : 1; /* overflow */
- __u32 chn : 1; /* chained */
- __u32 rsc : 4; /* reporting source code */
- __u32 anc : 1; /* ancillary report */
- __u32 res2 : 1; /* reserved zero */
- __u32 erc : 6; /* error-recovery code */
- __u32 rsid : 16; /* reporting-source ID */
-} __attribute__ ((packed));
-
-typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
-
-extern int s390_register_crw_handler(int rsc, crw_handler_t handler);
-extern void s390_unregister_crw_handler(int rsc);
-
-#define NR_RSCS 16
-
-#define CRW_RSC_MONITOR 0x2 /* monitoring facility */
-#define CRW_RSC_SCH 0x3 /* subchannel */
-#define CRW_RSC_CPATH 0x4 /* channel path */
-#define CRW_RSC_CONFIG 0x9 /* configuration-alert facility */
-#define CRW_RSC_CSS 0xB /* channel subsystem */
-
-#define CRW_ERC_EVENT 0x00 /* event information pending */
-#define CRW_ERC_AVAIL 0x01 /* available */
-#define CRW_ERC_INIT 0x02 /* initialized */
-#define CRW_ERC_TERROR 0x03 /* temporary error */
-#define CRW_ERC_IPARM 0x04 /* installed parm initialized */
-#define CRW_ERC_TERM 0x05 /* terminal */
-#define CRW_ERC_PERRN 0x06 /* perm. error, fac. not init */
-#define CRW_ERC_PERRI 0x07 /* perm. error, facility init */
-#define CRW_ERC_PMOD 0x08 /* installed parameters modified */
-
-static inline int stcrw(struct crw *pcrw )
-{
- int ccode;
-
- __asm__ __volatile__(
- "stcrw 0(%2)\n\t"
- "ipm %0\n\t"
- "srl %0,28\n\t"
- : "=d" (ccode), "=m" (*pcrw)
- : "a" (pcrw)
- : "cc" );
- return ccode;
-}
-
-#define ED_ETR_SYNC 12 /* External damage ETR sync check */
-#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
-
-#define ED_STP_SYNC 7 /* External damage STP sync check */
-#define ED_STP_ISLAND 6 /* External damage STP island check */
-
-struct pt_regs;
-
-void s390_handle_mcck(void);
-void s390_do_machine_check(struct pt_regs *regs);
-#endif /* __s390mach */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 8af7dfbe022c..616c60ffcf2c 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
*/
/*
@@ -249,8 +249,8 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
struct zfcp_port *port;
list_for_each_entry(port, &adapter->port_list_head, list)
- if ((port->wwpn == wwpn) && !(atomic_read(&port->status) &
- (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE)))
+ if ((port->wwpn == wwpn) &&
+ !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE))
return port;
return NULL;
}
@@ -421,7 +421,8 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
while (atomic_read(&adapter->stat_miss) > 0)
if (zfcp_fsf_status_read(adapter)) {
if (atomic_read(&adapter->stat_miss) >= 16) {
- zfcp_erp_adapter_reopen(adapter, 0, 103, NULL);
+ zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
+ NULL);
return 1;
}
break;
@@ -501,6 +502,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
spin_lock_init(&adapter->scsi_dbf_lock);
spin_lock_init(&adapter->rec_dbf_lock);
spin_lock_init(&adapter->req_q_lock);
+ spin_lock_init(&adapter->qdio_stat_lock);
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
@@ -522,7 +524,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
goto sysfs_failed;
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
- zfcp_fc_nameserver_init(adapter);
if (!zfcp_adapter_scsi_register(adapter))
return 0;
@@ -552,6 +553,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
+ cancel_delayed_work_sync(&adapter->nsp.work);
zfcp_adapter_scsi_unregister(adapter);
sysfs_remove_group(&adapter->ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs);
@@ -603,10 +605,13 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
init_waitqueue_head(&port->remove_wq);
INIT_LIST_HEAD(&port->unit_list_head);
INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
+ INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
+ INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
port->adapter = adapter;
port->d_id = d_id;
port->wwpn = wwpn;
+ port->rport_task = RPORT_NONE;
/* mark port unusable as long as sysfs registration is not complete */
atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
@@ -620,11 +625,10 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
dev_set_drvdata(&port->sysfs_device, port);
read_lock_irq(&zfcp_data.config_lock);
- if (!(status & ZFCP_STATUS_PORT_NO_WWPN))
- if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
- read_unlock_irq(&zfcp_data.config_lock);
- goto err_out_free;
- }
+ if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
+ read_unlock_irq(&zfcp_data.config_lock);
+ goto err_out_free;
+ }
read_unlock_irq(&zfcp_data.config_lock);
if (device_register(&port->sysfs_device))
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 285881f07648..cfb0dcb6e3ff 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,7 +3,7 @@
*
* Registration and callback for the s390 common I/O layer.
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
*/
#define KMSG_COMPONENT "zfcp"
@@ -72,8 +72,7 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
list_for_each_entry_safe(port, p, &port_remove_lh, list) {
list_for_each_entry_safe(unit, u, &unit_remove_lh, list) {
- if (atomic_read(&unit->status) &
- ZFCP_STATUS_UNIT_REGISTERED)
+ if (unit->device)
scsi_remove_device(unit->device);
zfcp_unit_dequeue(unit);
}
@@ -109,11 +108,12 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
/* initialize request counter */
BUG_ON(!zfcp_reqlist_isempty(adapter));
adapter->req_no = 0;
+ zfcp_fc_nameserver_init(adapter);
- zfcp_erp_modify_adapter_status(adapter, 10, NULL,
+ zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
- zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 85,
- NULL);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "ccsonl2", NULL);
zfcp_erp_wait(adapter);
up(&zfcp_data.config_sema);
flush_work(&adapter->scan_work);
@@ -137,7 +137,7 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(&ccw_device->dev);
- zfcp_erp_adapter_shutdown(adapter, 0, 86, NULL);
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
zfcp_erp_wait(adapter);
zfcp_erp_thread_kill(adapter);
up(&zfcp_data.config_sema);
@@ -160,21 +160,26 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
case CIO_GONE:
dev_warn(&adapter->ccw_device->dev,
"The FCP device has been detached\n");
- zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
break;
case CIO_NO_PATH:
dev_warn(&adapter->ccw_device->dev,
"The CHPID for the FCP device is offline\n");
- zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
break;
case CIO_OPER:
dev_info(&adapter->ccw_device->dev,
"The FCP device is operational again\n");
- zfcp_erp_modify_adapter_status(adapter, 11, NULL,
+ zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
- 89, NULL);
+ "ccnoti4", NULL);
+ break;
+ case CIO_BOXED:
+ dev_warn(&adapter->ccw_device->dev,
+ "The ccw device did not respond in time.\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
break;
}
return 1;
@@ -190,7 +195,7 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(&cdev->dev);
- zfcp_erp_adapter_shutdown(adapter, 0, 90, NULL);
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
zfcp_erp_wait(adapter);
up(&zfcp_data.config_sema);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index cb6df609953e..0a1a5dd8d018 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -490,172 +490,17 @@ static const char *zfcp_rec_dbf_tags[] = {
[ZFCP_REC_DBF_ID_ACTION] = "action",
};
-static const char *zfcp_rec_dbf_ids[] = {
- [1] = "new",
- [2] = "ready",
- [3] = "kill",
- [4] = "down sleep",
- [5] = "down wakeup",
- [6] = "down sleep ecd",
- [7] = "down wakeup ecd",
- [8] = "down sleep epd",
- [9] = "down wakeup epd",
- [10] = "online",
- [11] = "operational",
- [12] = "scsi slave destroy",
- [13] = "propagate failed adapter",
- [14] = "propagate failed port",
- [15] = "block adapter",
- [16] = "unblock adapter",
- [17] = "block port",
- [18] = "unblock port",
- [19] = "block unit",
- [20] = "unblock unit",
- [21] = "unit recovery failed",
- [22] = "port recovery failed",
- [23] = "adapter recovery failed",
- [24] = "qdio queues down",
- [25] = "p2p failed",
- [26] = "nameserver lookup failed",
- [27] = "nameserver port failed",
- [28] = "link up",
- [29] = "link down",
- [30] = "link up status read",
- [31] = "open port failed",
- [32] = "",
- [33] = "close port",
- [34] = "open unit failed",
- [35] = "exclusive open unit failed",
- [36] = "shared open unit failed",
- [37] = "link down",
- [38] = "link down status read no link",
- [39] = "link down status read fdisc login",
- [40] = "link down status read firmware update",
- [41] = "link down status read unknown reason",
- [42] = "link down ecd incomplete",
- [43] = "link down epd incomplete",
- [44] = "sysfs adapter recovery",
- [45] = "sysfs port recovery",
- [46] = "sysfs unit recovery",
- [47] = "port boxed abort",
- [48] = "unit boxed abort",
- [49] = "port boxed ct",
- [50] = "port boxed close physical",
- [51] = "port boxed open unit",
- [52] = "port boxed close unit",
- [53] = "port boxed fcp",
- [54] = "unit boxed fcp",
- [55] = "port access denied",
- [56] = "",
- [57] = "",
- [58] = "",
- [59] = "unit access denied",
- [60] = "shared unit access denied open unit",
- [61] = "",
- [62] = "request timeout",
- [63] = "adisc link test reject or timeout",
- [64] = "adisc link test d_id changed",
- [65] = "adisc link test failed",
- [66] = "recovery out of memory",
- [67] = "adapter recovery repeated after state change",
- [68] = "port recovery repeated after state change",
- [69] = "unit recovery repeated after state change",
- [70] = "port recovery follow-up after successful adapter recovery",
- [71] = "adapter recovery escalation after failed adapter recovery",
- [72] = "port recovery follow-up after successful physical port "
- "recovery",
- [73] = "adapter recovery escalation after failed physical port "
- "recovery",
- [74] = "unit recovery follow-up after successful port recovery",
- [75] = "physical port recovery escalation after failed port "
- "recovery",
- [76] = "port recovery escalation after failed unit recovery",
- [77] = "",
- [78] = "duplicate request id",
- [79] = "link down",
- [80] = "exclusive read-only unit access unsupported",
- [81] = "shared read-write unit access unsupported",
- [82] = "incoming rscn",
- [83] = "incoming wwpn",
- [84] = "wka port handle not valid close port",
- [85] = "online",
- [86] = "offline",
- [87] = "ccw device gone",
- [88] = "ccw device no path",
- [89] = "ccw device operational",
- [90] = "ccw device shutdown",
- [91] = "sysfs port addition",
- [92] = "sysfs port removal",
- [93] = "sysfs adapter recovery",
- [94] = "sysfs unit addition",
- [95] = "sysfs unit removal",
- [96] = "sysfs port recovery",
- [97] = "sysfs unit recovery",
- [98] = "sequence number mismatch",
- [99] = "link up",
- [100] = "error state",
- [101] = "status read physical port closed",
- [102] = "link up status read",
- [103] = "too many failed status read buffers",
- [104] = "port handle not valid abort",
- [105] = "lun handle not valid abort",
- [106] = "port handle not valid ct",
- [107] = "port handle not valid close port",
- [108] = "port handle not valid close physical port",
- [109] = "port handle not valid open unit",
- [110] = "port handle not valid close unit",
- [111] = "lun handle not valid close unit",
- [112] = "port handle not valid fcp",
- [113] = "lun handle not valid fcp",
- [114] = "handle mismatch fcp",
- [115] = "lun not valid fcp",
- [116] = "qdio send failed",
- [117] = "version mismatch",
- [118] = "incompatible qtcb type",
- [119] = "unknown protocol status",
- [120] = "unknown fsf command",
- [121] = "no recommendation for status qualifier",
- [122] = "status read physical port closed in error",
- [123] = "fc service class not supported",
- [124] = "",
- [125] = "need newer zfcp",
- [126] = "need newer microcode",
- [127] = "arbitrated loop not supported",
- [128] = "",
- [129] = "qtcb size mismatch",
- [130] = "unknown fsf status ecd",
- [131] = "fcp request too big",
- [132] = "",
- [133] = "data direction not valid fcp",
- [134] = "command length not valid fcp",
- [135] = "status read act update",
- [136] = "status read cfdc update",
- [137] = "hbaapi port open",
- [138] = "hbaapi unit open",
- [139] = "hbaapi unit shutdown",
- [140] = "qdio error outbound",
- [141] = "scsi host reset",
- [142] = "dismissing fsf request for recovery action",
- [143] = "recovery action timed out",
- [144] = "recovery action gone",
- [145] = "recovery action being processed",
- [146] = "recovery action ready for next step",
- [147] = "qdio error inbound",
- [148] = "nameserver needed for port scan",
- [149] = "port scan",
- [150] = "ptp attach",
- [151] = "port validation failed",
-};
-
static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
char *buf, const char *_rec)
{
struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec;
char *p = buf;
+ char hint[ZFCP_DBF_ID_SIZE + 1];
+ memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE);
+ hint[ZFCP_DBF_ID_SIZE] = 0;
zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]);
- zfcp_dbf_outs(&p, "hint", zfcp_rec_dbf_ids[r->id2]);
- zfcp_dbf_out(&p, "id", "%d", r->id2);
+ zfcp_dbf_outs(&p, "hint", hint);
switch (r->id) {
case ZFCP_REC_DBF_ID_THREAD:
zfcp_dbf_out(&p, "total", "%d", r->u.thread.total);
@@ -707,7 +552,7 @@ static struct debug_view zfcp_rec_dbf_view = {
* @adapter: adapter
* This function assumes that the caller is holding erp_lock.
*/
-void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
+void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter)
{
struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
unsigned long flags = 0;
@@ -723,7 +568,7 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
memset(r, 0, sizeof(*r));
r->id = ZFCP_REC_DBF_ID_THREAD;
- r->id2 = id2;
+ memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
r->u.thread.total = total;
r->u.thread.ready = ready;
r->u.thread.running = running;
@@ -737,7 +582,7 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
* @adapter: adapter
* This function assumes that the caller does not hold erp_lock.
*/
-void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter)
+void zfcp_rec_dbf_event_thread_lock(char *id2, struct zfcp_adapter *adapter)
{
unsigned long flags;
@@ -746,7 +591,7 @@ void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter)
read_unlock_irqrestore(&adapter->erp_lock, flags);
}
-static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
+static void zfcp_rec_dbf_event_target(char *id2, void *ref,
struct zfcp_adapter *adapter,
atomic_t *status, atomic_t *erp_count,
u64 wwpn, u32 d_id, u64 fcp_lun)
@@ -757,7 +602,7 @@ static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
memset(r, 0, sizeof(*r));
r->id = ZFCP_REC_DBF_ID_TARGET;
- r->id2 = id2;
+ memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
r->u.target.ref = (unsigned long)ref;
r->u.target.status = atomic_read(status);
r->u.target.wwpn = wwpn;
@@ -774,7 +619,8 @@ static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
* @ref: additional reference (e.g. request)
* @adapter: adapter
*/
-void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *adapter)
+void zfcp_rec_dbf_event_adapter(char *id, void *ref,
+ struct zfcp_adapter *adapter)
{
zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status,
&adapter->erp_counter, 0, 0, 0);
@@ -786,7 +632,7 @@ void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *adapter)
* @ref: additional reference (e.g. request)
* @port: port
*/
-void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port)
+void zfcp_rec_dbf_event_port(char *id, void *ref, struct zfcp_port *port)
{
struct zfcp_adapter *adapter = port->adapter;
@@ -801,7 +647,7 @@ void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port)
* @ref: additional reference (e.g. request)
* @unit: unit
*/
-void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit)
+void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit)
{
struct zfcp_port *port = unit->port;
struct zfcp_adapter *adapter = port->adapter;
@@ -822,7 +668,7 @@ void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit)
* @port: port
* @unit: unit
*/
-void zfcp_rec_dbf_event_trigger(u8 id2, void *ref, u8 want, u8 need,
+void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need,
void *action, struct zfcp_adapter *adapter,
struct zfcp_port *port, struct zfcp_unit *unit)
{
@@ -832,7 +678,7 @@ void zfcp_rec_dbf_event_trigger(u8 id2, void *ref, u8 want, u8 need,
spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
memset(r, 0, sizeof(*r));
r->id = ZFCP_REC_DBF_ID_TRIGGER;
- r->id2 = id2;
+ memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
r->u.trigger.ref = (unsigned long)ref;
r->u.trigger.want = want;
r->u.trigger.need = need;
@@ -855,7 +701,7 @@ void zfcp_rec_dbf_event_trigger(u8 id2, void *ref, u8 want, u8 need,
* @id2: identifier
* @erp_action: error recovery action struct pointer
*/
-void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
+void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
@@ -864,7 +710,7 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
memset(r, 0, sizeof(*r));
r->id = ZFCP_REC_DBF_ID_ACTION;
- r->id2 = id2;
+ memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
r->u.action.action = (unsigned long)erp_action;
r->u.action.status = erp_action->status;
r->u.action.step = erp_action->step;
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 74998ff88e57..a573f7344dd6 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -25,6 +25,7 @@
#include "zfcp_fsf.h"
#define ZFCP_DBF_TAG_SIZE 4
+#define ZFCP_DBF_ID_SIZE 7
struct zfcp_dbf_dump {
u8 tag[ZFCP_DBF_TAG_SIZE];
@@ -70,7 +71,7 @@ struct zfcp_rec_dbf_record_action {
struct zfcp_rec_dbf_record {
u8 id;
- u8 id2;
+ char id2[7];
union {
struct zfcp_rec_dbf_record_action action;
struct zfcp_rec_dbf_record_thread thread;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 510662783a6f..a0318630f047 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
*/
#ifndef ZFCP_DEF_H
@@ -243,9 +243,6 @@ struct zfcp_ls_adisc {
/* remote port status */
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
-#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
-#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
-#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
/* well known address (WKA) port status*/
enum zfcp_wka_status {
@@ -258,7 +255,6 @@ enum zfcp_wka_status {
/* logical unit status */
#define ZFCP_STATUS_UNIT_SHARED 0x00000004
#define ZFCP_STATUS_UNIT_READONLY 0x00000008
-#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
/* FSF request status (this does not have a common part) */
@@ -447,8 +443,9 @@ struct zfcp_adapter {
spinlock_t req_list_lock; /* request list lock */
struct zfcp_qdio_queue req_q; /* request queue */
spinlock_t req_q_lock; /* for operations on queue */
- int req_q_pci_batch; /* SBALs since PCI indication
- was last set */
+ ktime_t req_q_time; /* time of last fill level change */
+ u64 req_q_util; /* for accounting */
+ spinlock_t qdio_stat_lock;
u32 fsf_req_seq_no; /* FSF cmnd seq number */
wait_queue_head_t request_wq; /* can be used to wait for
more avaliable SBALs */
@@ -514,6 +511,9 @@ struct zfcp_port {
u32 maxframe_size;
u32 supported_classes;
struct work_struct gid_pn_work;
+ struct work_struct test_link_work;
+ struct work_struct rport_work;
+ enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task;
};
struct zfcp_unit {
@@ -587,9 +587,6 @@ struct zfcp_fsf_req_qtcb {
/********************** ZFCP SPECIFIC DEFINES ********************************/
-#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
-#define ZFCP_REQ_NO_QTCB 0x00000008
-
#define ZFCP_SET 0x00000100
#define ZFCP_CLEAR 0x00000200
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 387a3af528ac..631bdb1dfd6c 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
*/
#define KMSG_COMPONENT "zfcp"
@@ -55,7 +55,7 @@ enum zfcp_erp_act_result {
static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
{
- zfcp_erp_modify_adapter_status(adapter, 15, NULL,
+ zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL,
ZFCP_STATUS_COMMON_UNBLOCKED | mask,
ZFCP_CLEAR);
}
@@ -75,9 +75,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
struct zfcp_adapter *adapter = act->adapter;
list_move(&act->list, &act->adapter->erp_ready_head);
- zfcp_rec_dbf_event_action(146, act);
+ zfcp_rec_dbf_event_action("erardy1", act);
up(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread(2, adapter);
+ zfcp_rec_dbf_event_thread("erardy2", adapter);
}
static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -208,7 +208,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
- struct zfcp_unit *unit, u8 id, void *ref)
+ struct zfcp_unit *unit, char *id, void *ref)
{
int retval = 1, need;
struct zfcp_erp_action *act = NULL;
@@ -228,7 +228,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
up(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread(1, adapter);
+ zfcp_rec_dbf_event_thread("eracte1", adapter);
retval = 0;
out:
zfcp_rec_dbf_event_trigger(id, ref, want, need, act,
@@ -237,13 +237,14 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
}
static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
- int clear_mask, u8 id, void *ref)
+ int clear_mask, char *id, void *ref)
{
zfcp_erp_adapter_block(adapter, clear_mask);
+ zfcp_scsi_schedule_rports_block(adapter);
/* ensure propagation of failed status to new devices */
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
- zfcp_erp_adapter_failed(adapter, 13, NULL);
+ zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
return -EIO;
}
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
@@ -258,7 +259,7 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
* @ref: Reference for debug trace event.
*/
void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
- u8 id, void *ref)
+ char *id, void *ref)
{
unsigned long flags;
@@ -277,7 +278,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
* @ref: Reference for debug trace event.
*/
void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
- u8 id, void *ref)
+ char *id, void *ref)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref);
@@ -290,7 +291,8 @@ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
* @id: Id for debug trace event.
* @ref: Reference for debug trace event.
*/
-void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, u8 id, void *ref)
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
+ void *ref)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
zfcp_erp_port_reopen(port, clear | flags, id, ref);
@@ -303,7 +305,8 @@ void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, u8 id, void *ref)
* @id: Id for debug trace event.
* @ref: Reference for debug trace event.
*/
-void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, u8 id, void *ref)
+void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id,
+ void *ref)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
zfcp_erp_unit_reopen(unit, clear | flags, id, ref);
@@ -311,15 +314,16 @@ void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, u8 id, void *ref)
static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
{
- zfcp_erp_modify_port_status(port, 17, NULL,
+ zfcp_erp_modify_port_status(port, "erpblk1", NULL,
ZFCP_STATUS_COMMON_UNBLOCKED | clear,
ZFCP_CLEAR);
}
static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
- int clear, u8 id, void *ref)
+ int clear, char *id, void *ref)
{
zfcp_erp_port_block(port, clear);
+ zfcp_scsi_schedule_rport_block(port);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return;
@@ -334,7 +338,7 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
* @id: Id for debug trace event.
* @ref: Reference for debug trace event.
*/
-void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, u8 id,
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
void *ref)
{
unsigned long flags;
@@ -347,14 +351,15 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, u8 id,
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
-static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id,
+static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
void *ref)
{
zfcp_erp_port_block(port, clear);
+ zfcp_scsi_schedule_rport_block(port);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
/* ensure propagation of failed status to new devices */
- zfcp_erp_port_failed(port, 14, NULL);
+ zfcp_erp_port_failed(port, "erpreo1", NULL);
return -EIO;
}
@@ -369,7 +374,7 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id,
*
* Returns 0 if recovery has been triggered, < 0 if not.
*/
-int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, void *ref)
+int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
{
unsigned long flags;
int retval;
@@ -386,12 +391,12 @@ int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, void *ref)
static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
{
- zfcp_erp_modify_unit_status(unit, 19, NULL,
+ zfcp_erp_modify_unit_status(unit, "erublk1", NULL,
ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
ZFCP_CLEAR);
}
-static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id,
+static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
void *ref)
{
struct zfcp_adapter *adapter = unit->port->adapter;
@@ -411,7 +416,8 @@ static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id,
* @clear_mask: specifies flags in unit status to be cleared
* Return: 0 on success, < 0 on error
*/
-void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id, void *ref)
+void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
+ void *ref)
{
unsigned long flags;
struct zfcp_port *port = unit->port;
@@ -437,28 +443,28 @@ static int status_change_clear(unsigned long mask, atomic_t *status)
static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
- zfcp_rec_dbf_event_adapter(16, NULL, adapter);
+ zfcp_rec_dbf_event_adapter("eraubl1", NULL, adapter);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
}
static void zfcp_erp_port_unblock(struct zfcp_port *port)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
- zfcp_rec_dbf_event_port(18, NULL, port);
+ zfcp_rec_dbf_event_port("erpubl1", NULL, port);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
}
static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
- zfcp_rec_dbf_event_unit(20, NULL, unit);
+ zfcp_rec_dbf_event_unit("eruubl1", NULL, unit);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
}
static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
{
list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
- zfcp_rec_dbf_event_action(145, erp_action);
+ zfcp_rec_dbf_event_action("erator1", erp_action);
}
static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@@ -474,11 +480,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
- zfcp_rec_dbf_event_action(142, act);
+ zfcp_rec_dbf_event_action("erscf_1", act);
act->fsf_req->erp_action = NULL;
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
- zfcp_rec_dbf_event_action(143, act);
+ zfcp_rec_dbf_event_action("erscf_2", act);
if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
ZFCP_STATUS_FSFREQ_DISMISSED))
act->fsf_req = NULL;
@@ -530,7 +536,7 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
}
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
- int clear, u8 id, void *ref)
+ int clear, char *id, void *ref)
{
struct zfcp_port *port;
@@ -538,8 +544,8 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
_zfcp_erp_port_reopen(port, clear, id, ref);
}
-static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id,
- void *ref)
+static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
+ char *id, void *ref)
{
struct zfcp_unit *unit;
@@ -559,28 +565,28 @@ static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act)
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (status == ZFCP_ERP_SUCCEEDED)
- _zfcp_erp_port_reopen_all(adapter, 0, 70, NULL);
+ _zfcp_erp_port_reopen_all(adapter, 0, "ersfa_1", NULL);
else
- _zfcp_erp_adapter_reopen(adapter, 0, 71, NULL);
+ _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_2", NULL);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
if (status == ZFCP_ERP_SUCCEEDED)
- _zfcp_erp_port_reopen(port, 0, 72, NULL);
+ _zfcp_erp_port_reopen(port, 0, "ersfa_3", NULL);
else
- _zfcp_erp_adapter_reopen(adapter, 0, 73, NULL);
+ _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_4", NULL);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (status == ZFCP_ERP_SUCCEEDED)
- _zfcp_erp_unit_reopen_all(port, 0, 74, NULL);
+ _zfcp_erp_unit_reopen_all(port, 0, "ersfa_5", NULL);
else
- _zfcp_erp_port_forced_reopen(port, 0, 75, NULL);
+ _zfcp_erp_port_forced_reopen(port, 0, "ersfa_6", NULL);
break;
case ZFCP_ERP_ACTION_REOPEN_UNIT:
if (status != ZFCP_ERP_SUCCEEDED)
- _zfcp_erp_port_reopen(unit->port, 0, 76, NULL);
+ _zfcp_erp_port_reopen(unit->port, 0, "ersfa_7", NULL);
break;
}
}
@@ -617,7 +623,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
adapter->peer_d_id);
if (IS_ERR(port)) /* error or port already attached */
return;
- _zfcp_erp_port_reopen(port, 0, 150, NULL);
+ _zfcp_erp_port_reopen(port, 0, "ereptp1", NULL);
}
static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
@@ -640,9 +646,9 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_FAILED;
}
- zfcp_rec_dbf_event_thread_lock(6, adapter);
+ zfcp_rec_dbf_event_thread_lock("erasfx1", adapter);
down(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread_lock(7, adapter);
+ zfcp_rec_dbf_event_thread_lock("erasfx2", adapter);
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
break;
@@ -681,9 +687,9 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
if (ret)
return ZFCP_ERP_FAILED;
- zfcp_rec_dbf_event_thread_lock(8, adapter);
+ zfcp_rec_dbf_event_thread_lock("erasox1", adapter);
down(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread_lock(9, adapter);
+ zfcp_rec_dbf_event_thread_lock("erasox2", adapter);
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
return ZFCP_ERP_FAILED;
@@ -705,60 +711,59 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
return ZFCP_ERP_SUCCEEDED;
}
-static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
- int close)
+static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
{
- int retval = ZFCP_ERP_SUCCEEDED;
struct zfcp_adapter *adapter = act->adapter;
- if (close)
- goto close_only;
-
- retval = zfcp_erp_adapter_strategy_open_qdio(act);
- if (retval != ZFCP_ERP_SUCCEEDED)
- goto failed_qdio;
-
- retval = zfcp_erp_adapter_strategy_open_fsf(act);
- if (retval != ZFCP_ERP_SUCCEEDED)
- goto failed_openfcp;
-
- atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);
-
- return ZFCP_ERP_SUCCEEDED;
-
- close_only:
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
- &act->adapter->status);
-
- failed_openfcp:
/* close queues to ensure that buffers are not accessed by adapter */
zfcp_qdio_close(adapter);
zfcp_fsf_req_dismiss_all(adapter);
adapter->fsf_req_seq_no = 0;
/* all ports and units are closed */
- zfcp_erp_modify_adapter_status(adapter, 24, NULL,
+ zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
- failed_qdio:
+
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
- ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
- &act->adapter->status);
- return retval;
+ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
}
-static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
+static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
{
- int retval;
+ struct zfcp_adapter *adapter = act->adapter;
- zfcp_erp_adapter_strategy_generic(act, 1); /* close */
- if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
- return ZFCP_ERP_EXIT;
+ if (zfcp_erp_adapter_strategy_open_qdio(act)) {
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status);
+ return ZFCP_ERP_FAILED;
+ }
+
+ if (zfcp_erp_adapter_strategy_open_fsf(act)) {
+ zfcp_erp_adapter_strategy_close(act);
+ return ZFCP_ERP_FAILED;
+ }
+
+ atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
+
+ return ZFCP_ERP_SUCCEEDED;
+}
- retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */
+static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
+{
+ struct zfcp_adapter *adapter = act->adapter;
- if (retval == ZFCP_ERP_FAILED)
+ if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN) {
+ zfcp_erp_adapter_strategy_close(act);
+ if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+ return ZFCP_ERP_EXIT;
+ }
+
+ if (zfcp_erp_adapter_strategy_open(act)) {
ssleep(8);
+ return ZFCP_ERP_FAILED;
+ }
- return retval;
+ return ZFCP_ERP_SUCCEEDED;
}
static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
@@ -777,10 +782,7 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
{
- atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
- ZFCP_STATUS_PORT_PHYS_CLOSING |
- ZFCP_STATUS_PORT_INVALID_WWPN,
- &port->status);
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status);
}
static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
@@ -836,7 +838,7 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
struct zfcp_port *port = act->port;
if (port->wwpn != adapter->peer_wwpn) {
- zfcp_erp_port_failed(port, 25, NULL);
+ zfcp_erp_port_failed(port, "eroptp1", NULL);
return ZFCP_ERP_FAILED;
}
port->d_id = adapter->peer_d_id;
@@ -855,7 +857,7 @@ void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
if (retval)
zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED);
-
+ zfcp_port_put(port);
}
static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
@@ -871,17 +873,15 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
return zfcp_erp_open_ptp_port(act);
if (!port->d_id) {
- queue_work(zfcp_data.work_queue, &port->gid_pn_work);
+ zfcp_port_get(port);
+ if (!queue_work(zfcp_data.work_queue,
+ &port->gid_pn_work))
+ zfcp_port_put(port);
return ZFCP_ERP_CONTINUES;
}
case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
- if (!port->d_id) {
- if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
- zfcp_erp_port_failed(port, 26, NULL);
- return ZFCP_ERP_EXIT;
- }
+ if (!port->d_id)
return ZFCP_ERP_FAILED;
- }
return zfcp_erp_port_strategy_open_port(act);
case ZFCP_ERP_STEP_PORT_OPENING:
@@ -995,7 +995,7 @@ static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
"port 0x%016Lx\n",
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_failed(unit, 21, NULL);
+ zfcp_erp_unit_failed(unit, "erusck1", NULL);
}
break;
}
@@ -1025,7 +1025,7 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
dev_err(&port->adapter->ccw_device->dev,
"ERP failed for remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
- zfcp_erp_port_failed(port, 22, NULL);
+ zfcp_erp_port_failed(port, "erpsck1", NULL);
}
break;
}
@@ -1052,7 +1052,7 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
dev_err(&adapter->ccw_device->dev,
"ERP cannot recover an error "
"on the FCP device\n");
- zfcp_erp_adapter_failed(adapter, 23, NULL);
+ zfcp_erp_adapter_failed(adapter, "erasck1", NULL);
}
break;
}
@@ -1117,7 +1117,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
_zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED,
- 67, NULL);
+ "ersscg1", NULL);
return ZFCP_ERP_EXIT;
}
break;
@@ -1127,7 +1127,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
_zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
- 68, NULL);
+ "ersscg2", NULL);
return ZFCP_ERP_EXIT;
}
break;
@@ -1136,7 +1136,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
if (zfcp_erp_strat_change_det(&unit->status, erp_status)) {
_zfcp_erp_unit_reopen(unit,
ZFCP_STATUS_COMMON_ERP_FAILED,
- 69, NULL);
+ "ersscg3", NULL);
return ZFCP_ERP_EXIT;
}
break;
@@ -1155,7 +1155,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
}
list_del(&erp_action->list);
- zfcp_rec_dbf_event_action(144, erp_action);
+ zfcp_rec_dbf_event_action("eractd1", erp_action);
switch (erp_action->action) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
@@ -1214,38 +1214,8 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
INIT_WORK(&p->work, zfcp_erp_scsi_scan);
p->unit = unit;
- queue_work(zfcp_data.work_queue, &p->work);
-}
-
-static void zfcp_erp_rport_register(struct zfcp_port *port)
-{
- struct fc_rport_identifiers ids;
- ids.node_name = port->wwnn;
- ids.port_name = port->wwpn;
- ids.port_id = port->d_id;
- ids.roles = FC_RPORT_ROLE_FCP_TARGET;
- port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
- if (!port->rport) {
- dev_err(&port->adapter->ccw_device->dev,
- "Registering port 0x%016Lx failed\n",
- (unsigned long long)port->wwpn);
- return;
- }
-
- scsi_target_unblock(&port->rport->dev);
- port->rport->maxframe_size = port->maxframe_size;
- port->rport->supported_classes = port->supported_classes;
-}
-
-static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
-{
- struct zfcp_port *port;
- list_for_each_entry(port, &adapter->port_list_head, list) {
- if (!port->rport)
- continue;
- fc_remote_port_delete(port->rport);
- port->rport = NULL;
- }
+ if (!queue_work(zfcp_data.work_queue, &p->work))
+ zfcp_unit_put(unit);
}
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
@@ -1256,10 +1226,8 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
- if ((result == ZFCP_ERP_SUCCEEDED) &&
- !unit->device && port->rport) {
- atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED,
- &unit->status);
+ flush_work(&port->rport_work);
+ if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
if (!(atomic_read(&unit->status) &
ZFCP_STATUS_UNIT_SCSI_WORK_PENDING))
zfcp_erp_schedule_work(unit);
@@ -1269,27 +1237,17 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
- if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN) {
- zfcp_port_put(port);
- return;
- }
- if ((result == ZFCP_ERP_SUCCEEDED) && !port->rport)
- zfcp_erp_rport_register(port);
- if ((result != ZFCP_ERP_SUCCEEDED) && port->rport) {
- fc_remote_port_delete(port->rport);
- port->rport = NULL;
- }
+ if (result == ZFCP_ERP_SUCCEEDED)
+ zfcp_scsi_schedule_rport_register(port);
zfcp_port_put(port);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
- if (result != ZFCP_ERP_SUCCEEDED) {
- unregister_service_level(&adapter->service_level);
- zfcp_erp_rports_del(adapter);
- } else {
+ if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
schedule_work(&adapter->scan_work);
- }
+ } else
+ unregister_service_level(&adapter->service_level);
zfcp_adapter_put(adapter);
break;
}
@@ -1346,7 +1304,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
}
if (adapter->erp_total_count == adapter->erp_low_mem_count)
- _zfcp_erp_adapter_reopen(adapter, 0, 66, NULL);
+ _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL);
else {
zfcp_erp_strategy_memwait(erp_action);
retval = ZFCP_ERP_CONTINUES;
@@ -1406,9 +1364,9 @@ static int zfcp_erp_thread(void *data)
zfcp_erp_wakeup(adapter);
}
- zfcp_rec_dbf_event_thread_lock(4, adapter);
+ zfcp_rec_dbf_event_thread_lock("erthrd1", adapter);
ignore = down_interruptible(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread_lock(5, adapter);
+ zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);
}
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
@@ -1453,7 +1411,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
{
atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
up(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread_lock(3, adapter);
+ zfcp_rec_dbf_event_thread_lock("erthrk1", adapter);
wait_event(adapter->erp_thread_wqh,
!(atomic_read(&adapter->status) &
@@ -1469,7 +1427,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
* @id: Event id for debug trace.
* @ref: Reference for debug trace.
*/
-void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
+void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref)
{
zfcp_erp_modify_adapter_status(adapter, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
@@ -1481,7 +1439,7 @@ void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
* @id: Event id for debug trace.
* @ref: Reference for debug trace.
*/
-void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
+void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref)
{
zfcp_erp_modify_port_status(port, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
@@ -1493,7 +1451,7 @@ void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
* @id: Event id for debug trace.
* @ref: Reference for debug trace.
*/
-void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
+void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref)
{
zfcp_erp_modify_unit_status(unit, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
@@ -1520,7 +1478,7 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter)
*
* Changes in common status bits are propagated to attached ports and units.
*/
-void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id,
+void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
void *ref, u32 mask, int set_or_clear)
{
struct zfcp_port *port;
@@ -1554,7 +1512,7 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id,
*
* Changes in common status bits are propagated to attached units.
*/
-void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
+void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
u32 mask, int set_or_clear)
{
struct zfcp_unit *unit;
@@ -1586,7 +1544,7 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
* @mask: status bits to change
* @set_or_clear: ZFCP_SET or ZFCP_CLEAR
*/
-void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
+void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
u32 mask, int set_or_clear)
{
if (set_or_clear == ZFCP_SET) {
@@ -1609,7 +1567,7 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
* @id: The debug trace id.
* @id: Reference for the debug trace.
*/
-void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
+void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref)
{
unsigned long flags;
@@ -1626,7 +1584,7 @@ void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
* @id: The debug trace id.
* @id: Reference for the debug trace.
*/
-void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
+void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
{
zfcp_erp_modify_unit_status(unit, id, ref,
ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
@@ -1642,7 +1600,7 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
* Since the adapter has denied access, stop using the port and the
* attached units.
*/
-void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
+void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
{
unsigned long flags;
@@ -1661,14 +1619,14 @@ void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
*
* Since the adapter has denied access, stop using the unit.
*/
-void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref)
+void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref)
{
zfcp_erp_modify_unit_status(unit, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED |
ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
}
-static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id,
+static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id,
void *ref)
{
int status = atomic_read(&unit->status);
@@ -1679,7 +1637,7 @@ static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id,
zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
}
-static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
+static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
void *ref)
{
struct zfcp_unit *unit;
@@ -1701,7 +1659,7 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
* @id: Id for debug trace
* @ref: Reference for debug trace
*/
-void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
+void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id,
void *ref)
{
struct zfcp_port *port;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b5adeda93e1d..f6399ca97bcb 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
*/
#ifndef ZFCP_EXT_H
@@ -35,15 +35,15 @@ extern struct miscdevice zfcp_cfdc_misc;
/* zfcp_dbf.c */
extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_thread(u8, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_thread_lock(u8, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_adapter(u8, void *, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_port(u8, void *, struct zfcp_port *);
-extern void zfcp_rec_dbf_event_unit(u8, void *, struct zfcp_unit *);
-extern void zfcp_rec_dbf_event_trigger(u8, void *, u8, u8, void *,
+extern void zfcp_rec_dbf_event_thread(char *, struct zfcp_adapter *);
+extern void zfcp_rec_dbf_event_thread_lock(char *, struct zfcp_adapter *);
+extern void zfcp_rec_dbf_event_adapter(char *, void *, struct zfcp_adapter *);
+extern void zfcp_rec_dbf_event_port(char *, void *, struct zfcp_port *);
+extern void zfcp_rec_dbf_event_unit(char *, void *, struct zfcp_unit *);
+extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *,
struct zfcp_adapter *,
struct zfcp_port *, struct zfcp_unit *);
-extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
+extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *);
extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
struct fsf_status_read_buffer *);
@@ -66,31 +66,34 @@ extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
struct scsi_cmnd *);
/* zfcp_erp.c */
-extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
- u32, int);
-extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
-extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
-extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
-extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
+extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *,
+ void *, u32, int);
+extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
+extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
+ void *);
+extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *);
+extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32,
int);
-extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
-extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
-extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
-extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
-extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
+extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
+extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
+extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
+ void *);
+extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *);
+extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32,
int);
-extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
-extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
-extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
+extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *);
+extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *);
+extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *);
extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
-extern void zfcp_erp_port_boxed(struct zfcp_port *, u8, void *);
-extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8, void *);
-extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
-extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
-extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
+extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *);
+extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *);
+extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *);
+extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
+extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
+ void *);
extern void zfcp_erp_timeout_handler(unsigned long);
extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
@@ -101,6 +104,7 @@ extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
extern void zfcp_test_link(struct zfcp_port *);
+extern void zfcp_fc_link_test_work(struct work_struct *);
extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
/* zfcp_fsf.c */
@@ -125,16 +129,13 @@ extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
struct zfcp_erp_action *);
extern int zfcp_fsf_send_els(struct zfcp_send_els *);
-extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
- struct zfcp_unit *,
- struct scsi_cmnd *, int, int);
+extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
+ struct scsi_cmnd *);
extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
-extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *,
- struct zfcp_unit *, u8, int);
+extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
- struct zfcp_adapter *,
- struct zfcp_unit *, int);
+ struct zfcp_unit *);
/* zfcp_qdio.c */
extern int zfcp_qdio_allocate(struct zfcp_adapter *);
@@ -153,6 +154,10 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
extern struct fc_function_template zfcp_transport_functions;
+extern void zfcp_scsi_rport_work(struct work_struct *);
+extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
+extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
+extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
/* zfcp_sysfs.c */
extern struct attribute_group zfcp_sysfs_unit_attrs;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index eabdfe24456e..e8d032b9dfbd 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
*
* Fibre Channel related functions for the zfcp device driver.
*
- * Copyright IBM Corporation 2008
+ * Copyright IBM Corporation 2008, 2009
*/
#define KMSG_COMPONENT "zfcp"
@@ -94,12 +94,16 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
static void zfcp_wka_port_offline(struct work_struct *work)
{
- struct delayed_work *dw = container_of(work, struct delayed_work, work);
+ struct delayed_work *dw = to_delayed_work(work);
struct zfcp_wka_port *wka_port =
container_of(dw, struct zfcp_wka_port, work);
- wait_event(wka_port->completion_wq,
- atomic_read(&wka_port->refcount) == 0);
+ /* Don't wait forvever. If the wka_port is too busy take it offline
+ through a new call later */
+ if (!wait_event_timeout(wka_port->completion_wq,
+ atomic_read(&wka_port->refcount) == 0,
+ HZ >> 1))
+ return;
mutex_lock(&wka_port->mutex);
if ((atomic_read(&wka_port->refcount) != 0) ||
@@ -145,16 +149,10 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct zfcp_port *port;
read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
- if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_PHYS_OPEN))
- /* Try to connect to unused ports anyway. */
- zfcp_erp_port_reopen(port,
- ZFCP_STATUS_COMMON_ERP_FAILED,
- 82, fsf_req);
- else if ((port->d_id & range) == (elem->nport_did & range))
- /* Check connection status for connected ports */
+ list_for_each_entry(port, &fsf_req->adapter->port_list_head, list)
+ if ((port->d_id & range) == (elem->nport_did & range))
zfcp_test_link(port);
- }
+
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
@@ -196,7 +194,7 @@ static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
if (port && (port->wwpn == wwpn))
- zfcp_erp_port_forced_reopen(port, 0, 83, req);
+ zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
}
static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
@@ -259,10 +257,9 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
if (ct->status)
return;
- if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
- atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
+ if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT)
return;
- }
+
/* paranoia */
if (ct_iu_req->wwpn != port->wwpn)
return;
@@ -375,16 +372,22 @@ static void zfcp_fc_adisc_handler(unsigned long data)
if (adisc->els.status) {
/* request rejected or timed out */
- zfcp_erp_port_forced_reopen(port, 0, 63, NULL);
+ zfcp_erp_port_forced_reopen(port, 0, "fcadh_1", NULL);
goto out;
}
if (!port->wwnn)
port->wwnn = ls_adisc->wwnn;
- if (port->wwpn != ls_adisc->wwpn)
- zfcp_erp_port_reopen(port, 0, 64, NULL);
+ if ((port->wwpn != ls_adisc->wwpn) ||
+ !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
+ zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fcadh_2", NULL);
+ goto out;
+ }
+ /* port is good, unblock rport without going through erp */
+ zfcp_scsi_schedule_rport_register(port);
out:
zfcp_port_put(port);
kfree(adisc);
@@ -422,6 +425,31 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
return zfcp_fsf_send_els(&adisc->els);
}
+void zfcp_fc_link_test_work(struct work_struct *work)
+{
+ struct zfcp_port *port =
+ container_of(work, struct zfcp_port, test_link_work);
+ int retval;
+
+ if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_UNBLOCKED)) {
+ zfcp_port_put(port);
+ return; /* port erp is running and will update rport status */
+ }
+
+ zfcp_port_get(port);
+ port->rport_task = RPORT_DEL;
+ zfcp_scsi_rport_work(&port->rport_work);
+
+ retval = zfcp_fc_adisc(port);
+ if (retval == 0)
+ return;
+
+ /* send of ADISC was not possible */
+ zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
+
+ zfcp_port_put(port);
+}
+
/**
* zfcp_test_link - lightweight link test procedure
* @port: port to be tested
@@ -432,17 +460,9 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
*/
void zfcp_test_link(struct zfcp_port *port)
{
- int retval;
-
zfcp_port_get(port);
- retval = zfcp_fc_adisc(port);
- if (retval == 0)
- return;
-
- /* send of ADISC was not possible */
- zfcp_port_put(port);
- if (retval != -EBUSY)
- zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
+ if (!queue_work(zfcp_data.work_queue, &port->test_link_work))
+ zfcp_port_put(port);
}
static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
@@ -529,7 +549,7 @@ static void zfcp_validate_port(struct zfcp_port *port)
zfcp_port_put(port);
return;
}
- zfcp_erp_port_shutdown(port, 0, 151, NULL);
+ zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL);
zfcp_erp_wait(adapter);
zfcp_port_put(port);
zfcp_port_dequeue(port);
@@ -592,7 +612,7 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
if (IS_ERR(port))
ret = PTR_ERR(port);
else
- zfcp_erp_port_reopen(port, 0, 149, NULL);
+ zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
}
zfcp_erp_wait(adapter);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e6416f8541b0..b29f3121b666 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
*/
#define KMSG_COMPONENT "zfcp"
@@ -12,11 +12,14 @@
#include <linux/blktrace_api.h>
#include "zfcp_ext.h"
+#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
+#define ZFCP_REQ_NO_QTCB 0x00000008
+
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
- zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62,
- NULL);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fsrth_1", NULL);
}
static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
@@ -75,7 +78,7 @@ static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
(unsigned long long)port->wwpn);
zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
- zfcp_erp_port_access_denied(port, 55, req);
+ zfcp_erp_port_access_denied(port, "fspad_1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
@@ -89,7 +92,7 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
(unsigned long long)unit->port->wwpn);
zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
- zfcp_erp_unit_access_denied(unit, 59, req);
+ zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
@@ -97,7 +100,7 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
{
dev_err(&req->adapter->ccw_device->dev, "FCP device not "
"operational because of an unsupported FC class\n");
- zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
+ zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
@@ -159,20 +162,13 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
list_for_each_entry(port, &adapter->port_list_head, list)
if (port->d_id == d_id) {
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
- switch (sr_buf->status_subtype) {
- case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
- zfcp_erp_port_reopen(port, 0, 101, req);
- break;
- case FSF_STATUS_READ_SUB_ERROR_PORT:
- zfcp_erp_port_shutdown(port, 0, 122, req);
- break;
- }
+ zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
return;
}
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
-static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
+static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
struct fsf_link_down_info *link_down)
{
struct zfcp_adapter *adapter = req->adapter;
@@ -181,6 +177,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
return;
atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
+ zfcp_scsi_schedule_rports_block(adapter);
if (!link_down)
goto out;
@@ -261,13 +258,13 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
switch (sr_buf->status_subtype) {
case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
- zfcp_fsf_link_down_info_eval(req, 38, ldi);
+ zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi);
break;
case FSF_STATUS_READ_SUB_FDISC_FAILED:
- zfcp_fsf_link_down_info_eval(req, 39, ldi);
+ zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi);
break;
case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
- zfcp_fsf_link_down_info_eval(req, 40, NULL);
+ zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL);
};
}
@@ -307,22 +304,23 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
dev_info(&adapter->ccw_device->dev,
"The local link has been restored\n");
/* All ports should be marked as ready to run again */
- zfcp_erp_modify_adapter_status(adapter, 30, NULL,
+ zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL,
ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED,
- 102, req);
+ "fssrh_2", req);
break;
case FSF_STATUS_READ_NOTIFICATION_LOST:
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
- zfcp_erp_adapter_access_changed(adapter, 135, req);
+ zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
+ req);
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
schedule_work(&adapter->scan_work);
break;
case FSF_STATUS_READ_CFDC_UPDATED:
- zfcp_erp_adapter_access_changed(adapter, 136, req);
+ zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
break;
case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
adapter->adapter_features = sr_buf->payload.word[0];
@@ -351,7 +349,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
dev_err(&req->adapter->ccw_device->dev,
"The FCP adapter reported a problem "
"that cannot be recovered\n");
- zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
+ zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
break;
}
/* all non-return stats set FSFREQ_ERROR*/
@@ -368,7 +366,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
dev_err(&req->adapter->ccw_device->dev,
"The FCP adapter does not recognize the command 0x%x\n",
req->qtcb->header.fsf_command);
- zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
+ zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -400,17 +398,17 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
"QTCB version 0x%x not supported by FCP adapter "
"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
psq->word[0], psq->word[1]);
- zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
break;
case FSF_PROT_ERROR_STATE:
case FSF_PROT_SEQ_NUMB_ERROR:
- zfcp_erp_adapter_reopen(adapter, 0, 98, req);
+ zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
req->status |= ZFCP_STATUS_FSFREQ_RETRY;
break;
case FSF_PROT_UNSUPP_QTCB_TYPE:
dev_err(&adapter->ccw_device->dev,
"The QTCB type is not supported by the FCP adapter\n");
- zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
break;
case FSF_PROT_HOST_CONNECTION_INITIALIZING:
atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
@@ -420,27 +418,29 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
dev_err(&adapter->ccw_device->dev,
"0x%Lx is an ambiguous request identifier\n",
(unsigned long long)qtcb->bottom.support.req_handle);
- zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
break;
case FSF_PROT_LINK_DOWN:
- zfcp_fsf_link_down_info_eval(req, 37, &psq->link_down_info);
+ zfcp_fsf_link_down_info_eval(req, "fspse_5",
+ &psq->link_down_info);
/* FIXME: reopening adapter now? better wait for link up */
- zfcp_erp_adapter_reopen(adapter, 0, 79, req);
+ zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
break;
case FSF_PROT_REEST_QUEUE:
/* All ports should be marked as ready to run again */
- zfcp_erp_modify_adapter_status(adapter, 28, NULL,
+ zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL,
ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
- ZFCP_STATUS_COMMON_ERP_FAILED, 99, req);
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fspse_8", req);
break;
default:
dev_err(&adapter->ccw_device->dev,
"0x%x is not a valid transfer protocol status\n",
qtcb->prefix.prot_status);
- zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
}
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
@@ -526,7 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n");
- zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
return -EIO;
}
@@ -560,7 +560,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
"FCP adapter maximum QTCB size (%d bytes) "
"is too small\n",
bottom->max_qtcb_size);
- zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
return;
}
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -577,11 +577,11 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status);
- zfcp_fsf_link_down_info_eval(req, 42,
+ zfcp_fsf_link_down_info_eval(req, "fsecdh2",
&qtcb->header.fsf_status_qual.link_down_info);
break;
default:
- zfcp_erp_adapter_shutdown(adapter, 0, 130, req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
return;
}
@@ -597,14 +597,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
dev_err(&adapter->ccw_device->dev,
"The FCP adapter only supports newer "
"control block versions\n");
- zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
return;
}
if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
"The FCP adapter only supports older "
"control block versions\n");
- zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
}
}
@@ -617,9 +617,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
+ if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
fc_host_permanent_port_name(shost) = bottom->wwpn;
- else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ } else
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) = bottom->supported_speed;
@@ -638,20 +639,12 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
zfcp_fsf_exchange_port_evaluate(req);
- zfcp_fsf_link_down_info_eval(req, 43,
+ zfcp_fsf_link_down_info_eval(req, "fsepdh1",
&qtcb->header.fsf_status_qual.link_down_info);
break;
}
}
-static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
-{
- if (atomic_read(&adapter->req_q.count) > 0)
- return 1;
- atomic_inc(&adapter->qdio_outb_full);
- return 0;
-}
-
static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
__releases(&adapter->req_q_lock)
__acquires(&adapter->req_q_lock)
@@ -735,7 +728,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
req->adapter = adapter;
req->fsf_command = fsf_cmd;
- req->req_id = adapter->req_no++;
+ req->req_id = adapter->req_no;
req->sbal_number = 1;
req->sbal_first = req_q->first;
req->sbal_last = req_q->first;
@@ -791,13 +784,14 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
if (zfcp_reqlist_find_safe(adapter, req))
zfcp_reqlist_remove(adapter, req);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
- zfcp_erp_adapter_reopen(adapter, 0, 116, req);
+ zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
return -EIO;
}
/* Don't increase for unsolicited status */
if (req->qtcb)
adapter->fsf_req_seq_no++;
+ adapter->req_no++;
return 0;
}
@@ -870,14 +864,14 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
if (fsq->word[0] == fsq->word[1]) {
- zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104,
- req);
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0,
+ "fsafch1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
break;
case FSF_LUN_HANDLE_NOT_VALID:
if (fsq->word[0] == fsq->word[1]) {
- zfcp_erp_port_reopen(unit->port, 0, 105, req);
+ zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
break;
@@ -885,12 +879,12 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(unit->port, 47, req);
+ zfcp_erp_port_boxed(unit->port, "fsafch3", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY;
break;
case FSF_LUN_BOXED:
- zfcp_erp_unit_boxed(unit, 48, req);
+ zfcp_erp_unit_boxed(unit, "fsafch4", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY;
break;
@@ -912,27 +906,22 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
/**
* zfcp_fsf_abort_fcp_command - abort running SCSI command
* @old_req_id: unsigned long
- * @adapter: pointer to struct zfcp_adapter
* @unit: pointer to struct zfcp_unit
- * @req_flags: integer specifying the request flags
* Returns: pointer to struct zfcp_fsf_req
- *
- * FIXME(design): should be watched by a timeout !!!
*/
struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
- struct zfcp_adapter *adapter,
- struct zfcp_unit *unit,
- int req_flags)
+ struct zfcp_unit *unit)
{
struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
+ struct zfcp_adapter *adapter = unit->port->adapter;
- spin_lock(&adapter->req_q_lock);
- if (!zfcp_fsf_sbal_available(adapter))
+ spin_lock_bh(&adapter->req_q_lock);
+ if (zfcp_fsf_req_sbal_get(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
- req_flags, adapter->pool.fsf_req_abort);
+ 0, adapter->pool.fsf_req_abort);
if (IS_ERR(req)) {
req = NULL;
goto out;
@@ -960,7 +949,7 @@ out_error_free:
zfcp_fsf_req_free(req);
req = NULL;
out:
- spin_unlock(&adapter->req_q_lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return req;
}
@@ -998,7 +987,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
ZFCP_STATUS_FSFREQ_RETRY;
break;
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(adapter, 0, 106, req);
+ zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
case FSF_GENERIC_COMMAND_REJECTED:
case FSF_PAYLOAD_SIZE_MISMATCH:
case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1174,12 +1163,8 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
struct fsf_qtcb_bottom_support *bottom;
int ret = -EIO;
- if (unlikely(!(atomic_read(&els->port->status) &
- ZFCP_STATUS_COMMON_UNBLOCKED)))
- return -EBUSY;
-
- spin_lock(&adapter->req_q_lock);
- if (!zfcp_fsf_sbal_available(adapter))
+ spin_lock_bh(&adapter->req_q_lock);
+ if (zfcp_fsf_req_sbal_get(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
ZFCP_REQ_AUTO_CLEANUP, NULL);
@@ -1212,7 +1197,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
failed_send:
zfcp_fsf_req_free(req);
out:
- spin_unlock(&adapter->req_q_lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return ret;
}
@@ -1224,7 +1209,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
int retval = -EIO;
spin_lock_bh(&adapter->req_q_lock);
- if (!zfcp_fsf_sbal_available(adapter))
+ if (zfcp_fsf_req_sbal_get(adapter))
goto out;
req = zfcp_fsf_req_create(adapter,
FSF_QTCB_EXCHANGE_CONFIG_DATA,
@@ -1320,7 +1305,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
return -EOPNOTSUPP;
spin_lock_bh(&adapter->req_q_lock);
- if (!zfcp_fsf_sbal_available(adapter))
+ if (zfcp_fsf_req_sbal_get(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
ZFCP_REQ_AUTO_CLEANUP,
@@ -1366,7 +1351,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
return -EOPNOTSUPP;
spin_lock_bh(&adapter->req_q_lock);
- if (!zfcp_fsf_sbal_available(adapter))
+ if (zfcp_fsf_req_sbal_get(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
@@ -1416,7 +1401,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
"Not enough FCP adapter resources to open "
"remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
- zfcp_erp_port_failed(port, 31, req);
+ zfcp_erp_port_failed(port, "fsoph_1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1522,13 +1507,13 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(port->adapter, 0, 107, req);
+ zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
break;
case FSF_GOOD:
- zfcp_erp_modify_port_status(port, 33, req,
+ zfcp_erp_modify_port_status(port, "fscph_2", req,
ZFCP_STATUS_COMMON_OPEN,
ZFCP_CLEAR);
break;
@@ -1657,7 +1642,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
- zfcp_erp_adapter_reopen(wka_port->adapter, 0, 84, req);
+ zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
}
wka_port->status = ZFCP_WKA_PORT_OFFLINE;
@@ -1712,18 +1697,18 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
struct zfcp_unit *unit;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
- goto skip_fsfstatus;
+ return;
switch (header->fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(port->adapter, 0, 108, req);
+ zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ACCESS_DENIED:
zfcp_fsf_access_denied_port(req, port);
break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(port, 50, req);
+ zfcp_erp_port_boxed(port, "fscpph2", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY;
/* can't use generic zfcp_erp_modify_port_status because
@@ -1752,8 +1737,6 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
&unit->status);
break;
}
-skip_fsfstatus:
- atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
}
/**
@@ -1789,8 +1772,6 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
req->erp_action = erp_action;
req->handler = zfcp_fsf_close_physical_port_handler;
erp_action->fsf_req = req;
- atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
- &erp_action->port->status);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
@@ -1825,7 +1806,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, req);
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req);
/* fall through */
case FSF_LUN_ALREADY_OPEN:
break;
@@ -1835,7 +1816,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(unit->port, 51, req);
+ zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY;
break;
@@ -1851,7 +1832,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
else
zfcp_act_eval_err(adapter,
header->fsf_status_qual.word[2]);
- zfcp_erp_unit_access_denied(unit, 60, req);
+ zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1862,7 +1843,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
"0x%016Lx on port 0x%016Lx\n",
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_failed(unit, 34, req);
+ zfcp_erp_unit_failed(unit, "fsouh_4", req);
/* fall through */
case FSF_INVALID_COMMAND_OPTION:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1911,9 +1892,9 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
"port 0x%016Lx)\n",
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_failed(unit, 35, req);
+ zfcp_erp_unit_failed(unit, "fsouh_5", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
- zfcp_erp_unit_shutdown(unit, 0, 80, req);
+ zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
} else if (!exclusive && readwrite) {
dev_err(&adapter->ccw_device->dev,
"Shared read-write access not "
@@ -1921,9 +1902,9 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
"0x%016Lx)\n",
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_failed(unit, 36, req);
+ zfcp_erp_unit_failed(unit, "fsouh_7", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
- zfcp_erp_unit_shutdown(unit, 0, 81, req);
+ zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
}
}
break;
@@ -1988,15 +1969,15 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, req);
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_HANDLE_NOT_VALID:
- zfcp_erp_port_reopen(unit->port, 0, 111, req);
+ zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(unit->port, 52, req);
+ zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY;
break;
@@ -2073,7 +2054,6 @@ static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
struct fsf_qual_latency_info *lat_inf;
struct latency_cont *lat;
struct zfcp_unit *unit = req->unit;
- unsigned long flags;
lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
@@ -2091,11 +2071,11 @@ static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
return;
}
- spin_lock_irqsave(&unit->latencies.lock, flags);
+ spin_lock(&unit->latencies.lock);
zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
lat->counter++;
- spin_unlock_irqrestore(&unit->latencies.lock, flags);
+ spin_unlock(&unit->latencies.lock);
}
#ifdef CONFIG_BLK_DEV_IO_TRACE
@@ -2147,7 +2127,6 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
set_host_byte(scpnt, DID_SOFT_ERROR);
- set_driver_byte(scpnt, SUGGEST_RETRY);
goto skip_fsfstatus;
}
@@ -2237,12 +2216,12 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_HANDLE_MISMATCH:
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, req);
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_FCPLUN_NOT_VALID:
case FSF_LUN_HANDLE_NOT_VALID:
- zfcp_erp_port_reopen(unit->port, 0, 113, req);
+ zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -2258,7 +2237,8 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
req->qtcb->bottom.io.data_direction,
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
- zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
+ zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
+ req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_CMND_LENGTH_NOT_VALID:
@@ -2268,16 +2248,17 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
req->qtcb->bottom.io.fcp_cmnd_length,
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
- zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
+ zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
+ req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(unit->port, 53, req);
+ zfcp_erp_port_boxed(unit->port, "fssfch5", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY;
break;
case FSF_LUN_BOXED:
- zfcp_erp_unit_boxed(unit, 54, req);
+ zfcp_erp_unit_boxed(unit, "fssfch6", req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY;
break;
@@ -2314,30 +2295,29 @@ static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
/**
* zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
- * @adapter: adapter where scsi command is issued
* @unit: unit where command is sent to
* @scsi_cmnd: scsi command to be sent
- * @timer: timer to be started when request is initiated
- * @req_flags: flags for fsf_request
*/
-int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
- struct zfcp_unit *unit,
- struct scsi_cmnd *scsi_cmnd,
- int use_timer, int req_flags)
+int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
+ struct scsi_cmnd *scsi_cmnd)
{
struct zfcp_fsf_req *req;
struct fcp_cmnd_iu *fcp_cmnd_iu;
unsigned int sbtype;
int real_bytes, retval = -EIO;
+ struct zfcp_adapter *adapter = unit->port->adapter;
if (unlikely(!(atomic_read(&unit->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)))
return -EBUSY;
spin_lock(&adapter->req_q_lock);
- if (!zfcp_fsf_sbal_available(adapter))
+ if (atomic_read(&adapter->req_q.count) <= 0) {
+ atomic_inc(&adapter->qdio_outb_full);
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
+ }
+ req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND,
+ ZFCP_REQ_AUTO_CLEANUP,
adapter->pool.fsf_req_scsi);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -2411,7 +2391,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
"on port 0x%016Lx closed\n",
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_shutdown(unit, 0, 131, req);
+ zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
retval = -EINVAL;
}
goto failed_scsi_cmnd;
@@ -2419,9 +2399,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
- if (use_timer)
- zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
-
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
goto failed_scsi_cmnd;
@@ -2439,28 +2416,25 @@ out:
/**
* zfcp_fsf_send_fcp_ctm - send SCSI task management command
- * @adapter: pointer to struct zfcp-adapter
* @unit: pointer to struct zfcp_unit
* @tm_flags: unsigned byte for task management flags
- * @req_flags: int request flags
* Returns: on success pointer to struct fsf_req, NULL otherwise
*/
-struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
- struct zfcp_unit *unit,
- u8 tm_flags, int req_flags)
+struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
{
struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct fcp_cmnd_iu *fcp_cmnd_iu;
+ struct zfcp_adapter *adapter = unit->port->adapter;
if (unlikely(!(atomic_read(&unit->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)))
return NULL;
- spin_lock(&adapter->req_q_lock);
- if (!zfcp_fsf_sbal_available(adapter))
+ spin_lock_bh(&adapter->req_q_lock);
+ if (zfcp_fsf_req_sbal_get(adapter))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
+ req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0,
adapter->pool.fsf_req_scsi);
if (IS_ERR(req)) {
req = NULL;
@@ -2492,7 +2466,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
zfcp_fsf_req_free(req);
req = NULL;
out:
- spin_unlock(&adapter->req_q_lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return req;
}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 8bb200252347..df7f232faba8 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -127,10 +127,6 @@
#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
-/* status subtypes in status read buffer */
-#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
-#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
-
/* status subtypes for link down */
#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000
#define FSF_STATUS_READ_SUB_FDISC_FAILED 0x00000001
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 33e0a206a0a4..e0a215309df0 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -11,9 +11,6 @@
#include "zfcp_ext.h"
-/* FIXME(tune): free space should be one max. SBAL chain plus what? */
-#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
- - (FSF_MAX_SBALS_PER_REQ + 4))
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
@@ -58,7 +55,7 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter)
}
}
-static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
+static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id)
{
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
@@ -77,6 +74,23 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
}
}
+/* this needs to be called prior to updating the queue fill level */
+static void zfcp_qdio_account(struct zfcp_adapter *adapter)
+{
+ ktime_t now;
+ s64 span;
+ int free, used;
+
+ spin_lock(&adapter->qdio_stat_lock);
+ now = ktime_get();
+ span = ktime_us_delta(now, adapter->req_q_time);
+ free = max(0, atomic_read(&adapter->req_q.count));
+ used = QDIO_MAX_BUFFERS_PER_Q - free;
+ adapter->req_q_util += used * span;
+ adapter->req_q_time = now;
+ spin_unlock(&adapter->qdio_stat_lock);
+}
+
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int first, int count,
unsigned long parm)
@@ -86,13 +100,14 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
- zfcp_qdio_handler_error(adapter, 140);
+ zfcp_qdio_handler_error(adapter, "qdireq1");
return;
}
/* cleanup all SBALs being program-owned now */
zfcp_qdio_zero_sbals(queue->sbal, first, count);
+ zfcp_qdio_account(adapter);
atomic_add(count, &queue->count);
wake_up(&adapter->request_wq);
}
@@ -154,7 +169,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
- zfcp_qdio_handler_error(adapter, 147);
+ zfcp_qdio_handler_error(adapter, "qdires1");
return;
}
@@ -346,21 +361,12 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
struct zfcp_qdio_queue *req_q = &adapter->req_q;
int first = fsf_req->sbal_first;
int count = fsf_req->sbal_number;
- int retval, pci, pci_batch;
- struct qdio_buffer_element *sbale;
+ int retval;
+ unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
- /* acknowledgements for transferred buffers */
- pci_batch = adapter->req_q_pci_batch + count;
- if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
- pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
- pci = first + count - (pci_batch + 1);
- pci %= QDIO_MAX_BUFFERS_PER_Q;
- sbale = zfcp_qdio_sbale(req_q, pci, 0);
- sbale->flags |= SBAL_FLAGS0_PCI;
- }
+ zfcp_qdio_account(adapter);
- retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
- count);
+ retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count);
if (unlikely(retval)) {
zfcp_qdio_zero_sbals(req_q->sbal, first, count);
return retval;
@@ -370,7 +376,6 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
atomic_sub(count, &req_q->count);
req_q->first += count;
req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
- adapter->req_q_pci_batch = pci_batch;
return 0;
}
@@ -441,7 +446,6 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
}
req_q->first = 0;
atomic_set(&req_q->count, 0);
- adapter->req_q_pci_batch = 0;
adapter->resp_q.first = 0;
atomic_set(&adapter->resp_q.count, 0);
}
@@ -479,7 +483,6 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
/* set index of first avalable SBALS / number of available SBALS */
adapter->req_q.first = 0;
atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
- adapter->req_q_pci_batch = 0;
return 0;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9dc42a68fbdd..58201e1ae478 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
*/
#define KMSG_COMPONENT "zfcp"
@@ -27,9 +27,7 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
- atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
unit->device = NULL;
- zfcp_erp_unit_failed(unit, 12, NULL);
zfcp_unit_put(unit);
}
@@ -58,8 +56,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
{
struct zfcp_unit *unit;
struct zfcp_adapter *adapter;
- int status;
- int ret;
+ int status, scsi_result, ret;
+ struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
/* reset the status for this request */
scpnt->result = 0;
@@ -81,6 +79,14 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
return 0;
}
+ scsi_result = fc_remote_port_chkready(rport);
+ if (unlikely(scsi_result)) {
+ scpnt->result = scsi_result;
+ zfcp_scsi_dbf_event_result("fail", 4, adapter, scpnt, NULL);
+ scpnt->scsi_done(scpnt);
+ return 0;
+ }
+
status = atomic_read(&unit->status);
if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
!(status & ZFCP_STATUS_COMMON_RUNNING))) {
@@ -88,8 +94,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
return 0;;
}
- ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0,
- ZFCP_REQ_AUTO_CLEANUP);
+ ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
if (unlikely(ret == -EBUSY))
return SCSI_MLQUEUE_DEVICE_BUSY;
else if (unlikely(ret < 0))
@@ -133,8 +138,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
read_lock_irqsave(&zfcp_data.config_lock, flags);
unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
- if (unit &&
- (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_REGISTERED)) {
+ if (unit) {
sdp->hostdata = unit;
unit->device = sdp;
zfcp_unit_get(unit);
@@ -147,79 +151,91 @@ out:
static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
{
- struct Scsi_Host *scsi_host;
- struct zfcp_adapter *adapter;
- struct zfcp_unit *unit;
- struct zfcp_fsf_req *fsf_req;
+ struct Scsi_Host *scsi_host = scpnt->device->host;
+ struct zfcp_adapter *adapter =
+ (struct zfcp_adapter *) scsi_host->hostdata[0];
+ struct zfcp_unit *unit = scpnt->device->hostdata;
+ struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
unsigned long old_req_id = (unsigned long) scpnt->host_scribble;
int retval = SUCCESS;
-
- scsi_host = scpnt->device->host;
- adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
- unit = scpnt->device->hostdata;
+ int retry = 3;
/* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
- /* Check whether corresponding fsf_req is still pending */
spin_lock(&adapter->req_list_lock);
- fsf_req = zfcp_reqlist_find(adapter, old_req_id);
+ old_req = zfcp_reqlist_find(adapter, old_req_id);
spin_unlock(&adapter->req_list_lock);
- if (!fsf_req) {
+ if (!old_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
- zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0);
- return retval;
+ zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL,
+ old_req_id);
+ return SUCCESS;
}
- fsf_req->data = NULL;
+ old_req->data = NULL;
/* don't access old fsf_req after releasing the abort_lock */
write_unlock_irqrestore(&adapter->abort_lock, flags);
- fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0);
- if (!fsf_req) {
- zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
- old_req_id);
- retval = FAILED;
- return retval;
+ while (retry--) {
+ abrt_req = zfcp_fsf_abort_fcp_command(old_req_id, unit);
+ if (abrt_req)
+ break;
+
+ zfcp_erp_wait(adapter);
+ if (!(atomic_read(&adapter->status) &
+ ZFCP_STATUS_COMMON_RUNNING)) {
+ zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
+ old_req_id);
+ return SUCCESS;
+ }
}
+ if (!abrt_req)
+ return FAILED;
- __wait_event(fsf_req->completion_wq,
- fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+ wait_event(abrt_req->completion_wq,
+ abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
- if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
- zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0);
- } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
- zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0);
- } else {
- zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0);
+ if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
+ zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, abrt_req, 0);
+ else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
+ zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, abrt_req, 0);
+ else {
+ zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, abrt_req, 0);
retval = FAILED;
}
- zfcp_fsf_req_free(fsf_req);
-
+ zfcp_fsf_req_free(abrt_req);
return retval;
}
-static int zfcp_task_mgmt_function(struct zfcp_unit *unit, u8 tm_flags,
- struct scsi_cmnd *scpnt)
+static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
+ struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
struct zfcp_fsf_req *fsf_req;
int retval = SUCCESS;
-
- /* issue task management function */
- fsf_req = zfcp_fsf_send_fcp_ctm(adapter, unit, tm_flags, 0);
- if (!fsf_req) {
- zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt);
- return FAILED;
+ int retry = 3;
+
+ while (retry--) {
+ fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags);
+ if (fsf_req)
+ break;
+
+ zfcp_erp_wait(adapter);
+ if (!(atomic_read(&adapter->status) &
+ ZFCP_STATUS_COMMON_RUNNING)) {
+ zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit,
+ scpnt);
+ return SUCCESS;
+ }
}
+ if (!fsf_req)
+ return FAILED;
- __wait_event(fsf_req->completion_wq,
- fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+ wait_event(fsf_req->completion_wq,
+ fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
- /*
- * check completion status of task management function
- */
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
retval = FAILED;
@@ -230,40 +246,25 @@ static int zfcp_task_mgmt_function(struct zfcp_unit *unit, u8 tm_flags,
zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
zfcp_fsf_req_free(fsf_req);
-
return retval;
}
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
- struct zfcp_unit *unit = scpnt->device->hostdata;
-
- if (!unit) {
- WARN_ON(1);
- return SUCCESS;
- }
- return zfcp_task_mgmt_function(unit, FCP_LOGICAL_UNIT_RESET, scpnt);
+ return zfcp_task_mgmt_function(scpnt, FCP_LOGICAL_UNIT_RESET);
}
static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
- struct zfcp_unit *unit = scpnt->device->hostdata;
-
- if (!unit) {
- WARN_ON(1);
- return SUCCESS;
- }
- return zfcp_task_mgmt_function(unit, FCP_TARGET_RESET, scpnt);
+ return zfcp_task_mgmt_function(scpnt, FCP_TARGET_RESET);
}
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
- struct zfcp_unit *unit;
- struct zfcp_adapter *adapter;
+ struct zfcp_unit *unit = scpnt->device->hostdata;
+ struct zfcp_adapter *adapter = unit->port->adapter;
- unit = scpnt->device->hostdata;
- adapter = unit->port->adapter;
- zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt);
+ zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
zfcp_erp_wait(adapter);
return SUCCESS;
@@ -479,6 +480,109 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
rport->dev_loss_tmo = timeout;
}
+/**
+ * zfcp_scsi_dev_loss_tmo_callbk - Free any reference to rport
+ * @rport: The rport that is about to be deleted.
+ */
+static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+ struct zfcp_port *port = rport->dd_data;
+
+ write_lock_irq(&zfcp_data.config_lock);
+ port->rport = NULL;
+ write_unlock_irq(&zfcp_data.config_lock);
+}
+
+/**
+ * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
+ * @rport: The FC rport where to teminate I/O
+ *
+ * Abort all pending SCSI commands for a port by closing the
+ * port. Using a reopen for avoids a conflict with a shutdown
+ * overwriting a reopen.
+ */
+static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
+{
+ struct zfcp_port *port = rport->dd_data;
+
+ zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
+}
+
+static void zfcp_scsi_rport_register(struct zfcp_port *port)
+{
+ struct fc_rport_identifiers ids;
+ struct fc_rport *rport;
+
+ ids.node_name = port->wwnn;
+ ids.port_name = port->wwpn;
+ ids.port_id = port->d_id;
+ ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+
+ rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
+ if (!rport) {
+ dev_err(&port->adapter->ccw_device->dev,
+ "Registering port 0x%016Lx failed\n",
+ (unsigned long long)port->wwpn);
+ return;
+ }
+
+ rport->dd_data = port;
+ rport->maxframe_size = port->maxframe_size;
+ rport->supported_classes = port->supported_classes;
+ port->rport = rport;
+}
+
+static void zfcp_scsi_rport_block(struct zfcp_port *port)
+{
+ if (port->rport)
+ fc_remote_port_delete(port->rport);
+}
+
+void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
+{
+ zfcp_port_get(port);
+ port->rport_task = RPORT_ADD;
+
+ if (!queue_work(zfcp_data.work_queue, &port->rport_work))
+ zfcp_port_put(port);
+}
+
+void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
+{
+ zfcp_port_get(port);
+ port->rport_task = RPORT_DEL;
+
+ if (!queue_work(zfcp_data.work_queue, &port->rport_work))
+ zfcp_port_put(port);
+}
+
+void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
+{
+ struct zfcp_port *port;
+
+ list_for_each_entry(port, &adapter->port_list_head, list)
+ zfcp_scsi_schedule_rport_block(port);
+}
+
+void zfcp_scsi_rport_work(struct work_struct *work)
+{
+ struct zfcp_port *port = container_of(work, struct zfcp_port,
+ rport_work);
+
+ while (port->rport_task) {
+ if (port->rport_task == RPORT_ADD) {
+ port->rport_task = RPORT_NONE;
+ zfcp_scsi_rport_register(port);
+ } else {
+ port->rport_task = RPORT_NONE;
+ zfcp_scsi_rport_block(port);
+ }
+ }
+
+ zfcp_port_put(port);
+}
+
+
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
@@ -497,6 +601,8 @@ struct fc_function_template zfcp_transport_functions = {
.reset_fc_host_stats = zfcp_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
.get_host_port_state = zfcp_get_host_port_state,
+ .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk,
+ .terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
/* no functions registered for following dynamic attributes but
directly set by LLDD */
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 899af2b45b1e..9a3b8e261c0a 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -112,9 +112,9 @@ static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
zfcp_sysfs_##_feat##_failed_show, \
zfcp_sysfs_##_feat##_failed_store);
-ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, 44, 93);
-ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, 45, 96);
-ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, 46, 97);
+ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, "syafai1", "syafai2");
+ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
+ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
struct device_attribute *attr,
@@ -168,7 +168,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
goto out;
}
- zfcp_erp_port_shutdown(port, 0, 92, NULL);
+ zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
zfcp_erp_wait(adapter);
zfcp_port_put(port);
zfcp_port_dequeue(port);
@@ -222,7 +222,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
retval = 0;
- zfcp_erp_unit_reopen(unit, 0, 94, NULL);
+ zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
zfcp_erp_wait(unit->port->adapter);
zfcp_unit_put(unit);
out:
@@ -268,7 +268,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
goto out;
}
- zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
+ zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
zfcp_erp_wait(unit->port->adapter);
zfcp_unit_put(unit);
zfcp_unit_dequeue(unit);
@@ -318,10 +318,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
struct zfcp_unit *unit = sdev->hostdata; \
struct zfcp_latencies *lat = &unit->latencies; \
struct zfcp_adapter *adapter = unit->port->adapter; \
- unsigned long flags; \
unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
\
- spin_lock_irqsave(&lat->lock, flags); \
+ spin_lock_bh(&lat->lock); \
fsum = lat->_name.fabric.sum * adapter->timer_ticks; \
fmin = lat->_name.fabric.min * adapter->timer_ticks; \
fmax = lat->_name.fabric.max * adapter->timer_ticks; \
@@ -329,7 +328,7 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
cmin = lat->_name.channel.min * adapter->timer_ticks; \
cmax = lat->_name.channel.max * adapter->timer_ticks; \
cc = lat->_name.counter; \
- spin_unlock_irqrestore(&lat->lock, flags); \
+ spin_unlock_bh(&lat->lock); \
\
do_div(fsum, 1000); \
do_div(fmin, 1000); \
@@ -487,7 +486,8 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scsi_host->hostdata[0];
- return sprintf(buf, "%d\n", atomic_read(&adapter->qdio_outb_full));
+ return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full),
+ (unsigned long long)adapter->req_q_util);
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
deleted file mode 100644
index 0eea90781385..000000000000
--- a/drivers/s390/sysinfo.c
+++ /dev/null
@@ -1,469 +0,0 @@
-/*
- * drivers/s390/sysinfo.c
- *
- * Copyright IBM Corp. 2001, 2008
- * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com)
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <asm/ebcdic.h>
-#include <asm/sysinfo.h>
-#include <asm/cpcmd.h>
-
-/* Sigh, math-emu. Don't ask. */
-#include <asm/sfp-util.h>
-#include <math-emu/soft-fp.h>
-#include <math-emu/single.h>
-
-static inline int stsi_0(void)
-{
- int rc = stsi (NULL, 0, 0, 0);
- return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
-}
-
-static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
-{
- if (stsi(info, 1, 1, 1) == -ENOSYS)
- return len;
-
- EBCASC(info->manufacturer, sizeof(info->manufacturer));
- EBCASC(info->type, sizeof(info->type));
- EBCASC(info->model, sizeof(info->model));
- EBCASC(info->sequence, sizeof(info->sequence));
- EBCASC(info->plant, sizeof(info->plant));
- EBCASC(info->model_capacity, sizeof(info->model_capacity));
- EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
- EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
- len += sprintf(page + len, "Manufacturer: %-16.16s\n",
- info->manufacturer);
- len += sprintf(page + len, "Type: %-4.4s\n",
- info->type);
- if (info->model[0] != '\0')
- /*
- * Sigh: the model field has been renamed with System z9
- * to model_capacity and a new model field has been added
- * after the plant field. To avoid confusing older programs
- * the "Model:" prints "model_capacity model" or just
- * "model_capacity" if the model string is empty .
- */
- len += sprintf(page + len,
- "Model: %-16.16s %-16.16s\n",
- info->model_capacity, info->model);
- else
- len += sprintf(page + len, "Model: %-16.16s\n",
- info->model_capacity);
- len += sprintf(page + len, "Sequence Code: %-16.16s\n",
- info->sequence);
- len += sprintf(page + len, "Plant: %-4.4s\n",
- info->plant);
- len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n",
- info->model_capacity, *(u32 *) info->model_cap_rating);
- if (info->model_perm_cap[0] != '\0')
- len += sprintf(page + len,
- "Model Perm. Capacity: %-16.16s %08u\n",
- info->model_perm_cap,
- *(u32 *) info->model_perm_cap_rating);
- if (info->model_temp_cap[0] != '\0')
- len += sprintf(page + len,
- "Model Temp. Capacity: %-16.16s %08u\n",
- info->model_temp_cap,
- *(u32 *) info->model_temp_cap_rating);
- return len;
-}
-
-#if 0 /* Currently unused */
-static int stsi_1_2_1(struct sysinfo_1_2_1 *info, char *page, int len)
-{
- if (stsi(info, 1, 2, 1) == -ENOSYS)
- return len;
-
- len += sprintf(page + len, "\n");
- EBCASC(info->sequence, sizeof(info->sequence));
- EBCASC(info->plant, sizeof(info->plant));
- len += sprintf(page + len, "Sequence Code of CPU: %-16.16s\n",
- info->sequence);
- len += sprintf(page + len, "Plant of CPU: %-16.16s\n",
- info->plant);
- return len;
-}
-#endif
-
-static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
-{
- struct sysinfo_1_2_2_extension *ext;
- int i;
-
- if (stsi(info, 1, 2, 2) == -ENOSYS)
- return len;
- ext = (struct sysinfo_1_2_2_extension *)
- ((unsigned long) info + info->acc_offset);
-
- len += sprintf(page + len, "\n");
- len += sprintf(page + len, "CPUs Total: %d\n",
- info->cpus_total);
- len += sprintf(page + len, "CPUs Configured: %d\n",
- info->cpus_configured);
- len += sprintf(page + len, "CPUs Standby: %d\n",
- info->cpus_standby);
- len += sprintf(page + len, "CPUs Reserved: %d\n",
- info->cpus_reserved);
-
- if (info->format == 1) {
- /*
- * Sigh 2. According to the specification the alternate
- * capability field is a 32 bit floating point number
- * if the higher order 8 bits are not zero. Printing
- * a floating point number in the kernel is a no-no,
- * always print the number as 32 bit unsigned integer.
- * The user-space needs to know about the strange
- * encoding of the alternate cpu capability.
- */
- len += sprintf(page + len, "Capability: %u %u\n",
- info->capability, ext->alt_capability);
- for (i = 2; i <= info->cpus_total; i++)
- len += sprintf(page + len,
- "Adjustment %02d-way: %u %u\n",
- i, info->adjustment[i-2],
- ext->alt_adjustment[i-2]);
-
- } else {
- len += sprintf(page + len, "Capability: %u\n",
- info->capability);
- for (i = 2; i <= info->cpus_total; i++)
- len += sprintf(page + len,
- "Adjustment %02d-way: %u\n",
- i, info->adjustment[i-2]);
- }
-
- if (info->secondary_capability != 0)
- len += sprintf(page + len, "Secondary Capability: %d\n",
- info->secondary_capability);
-
- return len;
-}
-
-#if 0 /* Currently unused */
-static int stsi_2_2_1(struct sysinfo_2_2_1 *info, char *page, int len)
-{
- if (stsi(info, 2, 2, 1) == -ENOSYS)
- return len;
-
- len += sprintf(page + len, "\n");
- EBCASC (info->sequence, sizeof(info->sequence));
- EBCASC (info->plant, sizeof(info->plant));
- len += sprintf(page + len, "Sequence Code of logical CPU: %-16.16s\n",
- info->sequence);
- len += sprintf(page + len, "Plant of logical CPU: %-16.16s\n",
- info->plant);
- return len;
-}
-#endif
-
-static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len)
-{
- if (stsi(info, 2, 2, 2) == -ENOSYS)
- return len;
-
- EBCASC (info->name, sizeof(info->name));
-
- len += sprintf(page + len, "\n");
- len += sprintf(page + len, "LPAR Number: %d\n",
- info->lpar_number);
-
- len += sprintf(page + len, "LPAR Characteristics: ");
- if (info->characteristics & LPAR_CHAR_DEDICATED)
- len += sprintf(page + len, "Dedicated ");
- if (info->characteristics & LPAR_CHAR_SHARED)
- len += sprintf(page + len, "Shared ");
- if (info->characteristics & LPAR_CHAR_LIMITED)
- len += sprintf(page + len, "Limited ");
- len += sprintf(page + len, "\n");
-
- len += sprintf(page + len, "LPAR Name: %-8.8s\n",
- info->name);
-
- len += sprintf(page + len, "LPAR Adjustment: %d\n",
- info->caf);
-
- len += sprintf(page + len, "LPAR CPUs Total: %d\n",
- info->cpus_total);
- len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
- info->cpus_configured);
- len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
- info->cpus_standby);
- len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
- info->cpus_reserved);
- len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
- info->cpus_dedicated);
- len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
- info->cpus_shared);
- return len;
-}
-
-static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len)
-{
- int i;
-
- if (stsi(info, 3, 2, 2) == -ENOSYS)
- return len;
- for (i = 0; i < info->count; i++) {
- EBCASC (info->vm[i].name, sizeof(info->vm[i].name));
- EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi));
- len += sprintf(page + len, "\n");
- len += sprintf(page + len, "VM%02d Name: %-8.8s\n",
- i, info->vm[i].name);
- len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n",
- i, info->vm[i].cpi);
-
- len += sprintf(page + len, "VM%02d Adjustment: %d\n",
- i, info->vm[i].caf);
-
- len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
- i, info->vm[i].cpus_total);
- len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
- i, info->vm[i].cpus_configured);
- len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
- i, info->vm[i].cpus_standby);
- len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
- i, info->vm[i].cpus_reserved);
- }
- return len;
-}
-
-
-static int proc_read_sysinfo(char *page, char **start,
- off_t off, int count,
- int *eof, void *data)
-{
- unsigned long info = get_zeroed_page (GFP_KERNEL);
- int level, len;
-
- if (!info)
- return 0;
-
- len = 0;
- level = stsi_0();
- if (level >= 1)
- len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
-
- if (level >= 1)
- len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
-
- if (level >= 2)
- len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len);
-
- if (level >= 3)
- len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len);
-
- free_page (info);
- return len;
-}
-
-static __init int create_proc_sysinfo(void)
-{
- create_proc_read_entry("sysinfo", 0444, NULL,
- proc_read_sysinfo, NULL);
- return 0;
-}
-
-__initcall(create_proc_sysinfo);
-
-/*
- * Service levels interface.
- */
-
-static DECLARE_RWSEM(service_level_sem);
-static LIST_HEAD(service_level_list);
-
-int register_service_level(struct service_level *slr)
-{
- struct service_level *ptr;
-
- down_write(&service_level_sem);
- list_for_each_entry(ptr, &service_level_list, list)
- if (ptr == slr) {
- up_write(&service_level_sem);
- return -EEXIST;
- }
- list_add_tail(&slr->list, &service_level_list);
- up_write(&service_level_sem);
- return 0;
-}
-EXPORT_SYMBOL(register_service_level);
-
-int unregister_service_level(struct service_level *slr)
-{
- struct service_level *ptr, *next;
- int rc = -ENOENT;
-
- down_write(&service_level_sem);
- list_for_each_entry_safe(ptr, next, &service_level_list, list) {
- if (ptr != slr)
- continue;
- list_del(&ptr->list);
- rc = 0;
- break;
- }
- up_write(&service_level_sem);
- return rc;
-}
-EXPORT_SYMBOL(unregister_service_level);
-
-static void *service_level_start(struct seq_file *m, loff_t *pos)
-{
- down_read(&service_level_sem);
- return seq_list_start(&service_level_list, *pos);
-}
-
-static void *service_level_next(struct seq_file *m, void *p, loff_t *pos)
-{
- return seq_list_next(p, &service_level_list, pos);
-}
-
-static void service_level_stop(struct seq_file *m, void *p)
-{
- up_read(&service_level_sem);
-}
-
-static int service_level_show(struct seq_file *m, void *p)
-{
- struct service_level *slr;
-
- slr = list_entry(p, struct service_level, list);
- slr->seq_print(m, slr);
- return 0;
-}
-
-static const struct seq_operations service_level_seq_ops = {
- .start = service_level_start,
- .next = service_level_next,
- .stop = service_level_stop,
- .show = service_level_show
-};
-
-static int service_level_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &service_level_seq_ops);
-}
-
-static const struct file_operations service_level_ops = {
- .open = service_level_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
-
-static void service_level_vm_print(struct seq_file *m,
- struct service_level *slr)
-{
- char *query_buffer, *str;
-
- query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA);
- if (!query_buffer)
- return;
- cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL);
- str = strchr(query_buffer, '\n');
- if (str)
- *str = 0;
- seq_printf(m, "VM: %s\n", query_buffer);
- kfree(query_buffer);
-}
-
-static struct service_level service_level_vm = {
- .seq_print = service_level_vm_print
-};
-
-static __init int create_proc_service_level(void)
-{
- proc_create("service_levels", 0, NULL, &service_level_ops);
- if (MACHINE_IS_VM)
- register_service_level(&service_level_vm);
- return 0;
-}
-
-subsys_initcall(create_proc_service_level);
-
-/*
- * Bogomips calculation based on cpu capability.
- */
-
-int get_cpu_capability(unsigned int *capability)
-{
- struct sysinfo_1_2_2 *info;
- int rc;
-
- info = (void *) get_zeroed_page(GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- rc = stsi(info, 1, 2, 2);
- if (rc == -ENOSYS)
- goto out;
- rc = 0;
- *capability = info->capability;
-out:
- free_page((unsigned long) info);
- return rc;
-}
-
-/*
- * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
- */
-void s390_adjust_jiffies(void)
-{
- struct sysinfo_1_2_2 *info;
- const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- unsigned int capability;
-
- info = (void *) get_zeroed_page(GFP_KERNEL);
- if (!info)
- return;
-
- if (stsi(info, 1, 2, 2) != -ENOSYS) {
- /*
- * Major sigh. The cpu capability encoding is "special".
- * If the first 9 bits of info->capability are 0 then it
- * is a 32 bit unsigned integer in the range 0 .. 2^23.
- * If the first 9 bits are != 0 then it is a 32 bit float.
- * In addition a lower value indicates a proportionally
- * higher cpu capacity. Bogomips are the other way round.
- * To get to a halfway suitable number we divide 1e7
- * by the cpu capability number. Yes, that means a floating
- * point division .. math-emu here we come :-)
- */
- FP_UNPACK_SP(SA, &fmil);
- if ((info->capability >> 23) == 0)
- FP_FROM_INT_S(SB, info->capability, 32, int);
- else
- FP_UNPACK_SP(SB, &info->capability);
- FP_DIV_S(SR, SA, SB);
- FP_TO_INT_S(capability, SR, 32, 0);
- } else
- /*
- * Really old machine without stsi block for basic
- * cpu information. Report 42.0 bogomips.
- */
- capability = 42;
- loops_per_jiffy = capability * (500000/HZ);
- free_page((unsigned long) info);
-}
-
-/*
- * calibrate the delay loop
- */
-void __cpuinit calibrate_delay(void)
-{
- s390_adjust_jiffies();
- /* Print the good old Bogomips line .. */
- printk(KERN_DEBUG "Calibrating delay loop (skipped)... "
- "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100);
-}