summaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/Kconfig2
-rw-r--r--drivers/s390/block/dasd.c53
-rw-r--r--drivers/s390/block/dasd_3990_erp.c17
-rw-r--r--drivers/s390/block/dasd_alias.c13
-rw-r--r--drivers/s390/block/dasd_devmap.c43
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c27
-rw-r--r--drivers/s390/block/dasd_ioctl.c1
-rw-r--r--drivers/s390/block/dasd_proc.c17
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/defkeymap.c66
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/keyboard.c32
-rw-r--r--drivers/s390/char/keyboard.h11
-rw-r--r--drivers/s390/char/sclp.c58
-rw-r--r--drivers/s390/char/sclp.h61
-rw-r--r--drivers/s390/char/sclp_ctl.c1
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/sclp_early_core.c38
-rw-r--r--drivers/s390/char/sclp_sd.c569
-rw-r--r--drivers/s390/char/sclp_tty.c5
-rw-r--r--drivers/s390/char/tape_proc.c19
-rw-r--r--drivers/s390/char/vmcp.c1
-rw-r--r--drivers/s390/cio/ccwgroup.c14
-rw-r--r--drivers/s390/cio/chp.c34
-rw-r--r--drivers/s390/cio/chp.h5
-rw-r--r--drivers/s390/cio/chsc.c73
-rw-r--r--drivers/s390/cio/chsc.h11
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/cio.c257
-rw-r--r--drivers/s390/cio/device.c16
-rw-r--r--drivers/s390/cio/device_ops.c35
-rw-r--r--drivers/s390/cio/ioasm.c24
-rw-r--r--drivers/s390/cio/ioasm.h1
-rw-r--r--drivers/s390/cio/qdio_main.c135
-rw-r--r--drivers/s390/cio/qdio_setup.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c13
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c24
-rw-r--r--drivers/s390/crypto/ap_bus.c32
-rw-r--r--drivers/s390/crypto/ap_bus.h22
-rw-r--r--drivers/s390/crypto/ap_debug.h3
-rw-r--r--drivers/s390/crypto/pkey_api.c41
-rw-r--r--drivers/s390/crypto/zcrypt_api.c486
-rw-r--r--drivers/s390/crypto/zcrypt_api.h26
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c51
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/qeth_core.h63
-rw-r--r--drivers/s390/net/qeth_core_main.c310
-rw-r--r--drivers/s390/net/qeth_core_mpc.h14
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c158
-rw-r--r--drivers/s390/net/qeth_l3_main.c207
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c23
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c14
56 files changed, 1682 insertions, 1480 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 1444333210c7..9ac7574e3cfb 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -15,8 +15,8 @@ config BLK_DEV_XPRAM
config DCSSBLK
def_tristate m
- select DAX
select FS_DAX_LIMITED
+ select DAX_DRIVER
prompt "DCSSBLK support"
depends on S390 && BLOCK
help
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index b5692a284bd8..73cce3ecb97f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2569,14 +2569,11 @@ EXPORT_SYMBOL(dasd_sleep_on_immediatly);
* Cancellation of a request is an asynchronous operation! The calling
* function has to wait until the request is properly returned via callback.
*/
-int dasd_cancel_req(struct dasd_ccw_req *cqr)
+static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
{
struct dasd_device *device = cqr->startdev;
- unsigned long flags;
- int rc;
+ int rc = 0;
- rc = 0;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
switch (cqr->status) {
case DASD_CQR_QUEUED:
/* request was not started - just set to cleared */
@@ -2596,11 +2593,21 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
default: /* already finished or clear pending - do nothing */
break;
}
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_device_bh(device);
return rc;
}
-EXPORT_SYMBOL(dasd_cancel_req);
+
+int dasd_cancel_req(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device = cqr->startdev;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ rc = __dasd_cancel_req(cqr);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return rc;
+}
/*
* SECTION: Operations of the dasd_block layer.
@@ -3034,7 +3041,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
cqr->callback_data = req;
cqr->status = DASD_CQR_FILLED;
cqr->dq = dq;
- req->completion_data = cqr;
+ *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
+
blk_mq_start_request(req);
spin_lock(&block->queue_lock);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
@@ -3053,19 +3061,20 @@ out:
*
* Return values:
* BLK_EH_RESET_TIMER if the request should be left running
- * BLK_EH_NOT_HANDLED if the request is handled or terminated
+ * BLK_EH_DONE if the request is handled or terminated
* by the driver.
*/
enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
{
- struct dasd_ccw_req *cqr = req->completion_data;
struct dasd_block *block = req->q->queuedata;
struct dasd_device *device;
+ struct dasd_ccw_req *cqr;
unsigned long flags;
int rc = 0;
+ cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
if (!cqr)
- return BLK_EH_NOT_HANDLED;
+ return BLK_EH_DONE;
spin_lock_irqsave(&cqr->dq->lock, flags);
device = cqr->startdev ? cqr->startdev : block->base;
@@ -3082,12 +3091,10 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
cqr->retries = -1;
cqr->intrc = -ETIMEDOUT;
if (cqr->status >= DASD_CQR_QUEUED) {
- spin_unlock(get_ccwdev_lock(device->cdev));
- rc = dasd_cancel_req(cqr);
+ rc = __dasd_cancel_req(cqr);
} else if (cqr->status == DASD_CQR_FILLED ||
cqr->status == DASD_CQR_NEED_ERP) {
cqr->status = DASD_CQR_TERMINATED;
- spin_unlock(get_ccwdev_lock(device->cdev));
} else if (cqr->status == DASD_CQR_IN_ERP) {
struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
@@ -3102,9 +3109,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
searchcqr->retries = -1;
searchcqr->intrc = -ETIMEDOUT;
if (searchcqr->status >= DASD_CQR_QUEUED) {
- spin_unlock(get_ccwdev_lock(device->cdev));
- rc = dasd_cancel_req(searchcqr);
- spin_lock(get_ccwdev_lock(device->cdev));
+ rc = __dasd_cancel_req(searchcqr);
} else if ((searchcqr->status == DASD_CQR_FILLED) ||
(searchcqr->status == DASD_CQR_NEED_ERP)) {
searchcqr->status = DASD_CQR_TERMINATED;
@@ -3118,13 +3123,13 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
}
break;
}
- spin_unlock(get_ccwdev_lock(device->cdev));
}
+ spin_unlock(get_ccwdev_lock(device->cdev));
dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock);
spin_unlock_irqrestore(&cqr->dq->lock, flags);
- return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+ return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
}
static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -3169,6 +3174,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
int rc;
block->tag_set.ops = &dasd_mq_ops;
+ block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -3918,8 +3924,13 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
cqr = refers;
}
- if (cqr->block)
- list_del_init(&cqr->blocklist);
+ /*
+ * _dasd_requeue_request already checked for a valid
+ * blockdevice, no need to check again
+ * all erp requests (cqr->refers) have a cqr->block
+ * pointer copy from the original cqr
+ */
+ list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp(
cqr, (struct request *) cqr->callback_data);
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index ee14d8e45c97..ee73b0607e47 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2214,15 +2214,28 @@ static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
{
int pos = pathmask_to_pos(lpum);
+ if (!(device->features & DASD_FEATURE_PATH_AUTODISABLE)) {
+ dev_err(&device->cdev->dev,
+ "Path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
+ device->path[pos].cssid, device->path[pos].chpid, lpum);
+ goto out;
+ }
+
/* no remaining path, cannot disable */
- if (!(dasd_path_get_opm(device) & ~lpum))
- return;
+ if (!(dasd_path_get_opm(device) & ~lpum)) {
+ dev_err(&device->cdev->dev,
+ "Last path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
+ device->path[pos].cssid, device->path[pos].chpid, lpum);
+ goto out;
+ }
dev_err(&device->cdev->dev,
"Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
device->path[pos].cssid, device->path[pos].chpid, lpum);
dasd_path_remove_opm(device, lpum);
dasd_path_add_ifccpm(device, lpum);
+
+out:
device->path[pos].errorclk = 0;
atomic_set(&device->path[pos].error_count, 0);
}
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 62f5f04d8f61..5e963fe0e38d 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
int dasd_alias_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
- struct alias_lcu *lcu;
+ __u8 uaddr = private->uid.real_unit_addr;
+ struct alias_lcu *lcu = private->lcu;
unsigned long flags;
int rc;
- lcu = private->lcu;
rc = 0;
spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * Check if device and lcu type differ. If so, the uac data may be
+ * outdated and needs to be updated.
+ */
+ if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
+ lcu->flags |= UPDATE_PENDING;
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "uid type mismatch - trigger rescan");
+ }
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index e7cd28ff1984..b9ebb565ee2c 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1550,9 +1550,49 @@ dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
dasd_put_device(device);
return count;
}
-
static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
dasd_path_threshold_store);
+
+/*
+ * configure if path is disabled after IFCC/CCC error threshold is
+ * exceeded
+ */
+static ssize_t
+dasd_path_autodisable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_devmap *devmap;
+ int flag;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (!IS_ERR(devmap))
+ flag = (devmap->features & DASD_FEATURE_PATH_AUTODISABLE) != 0;
+ else
+ flag = (DASD_FEATURE_DEFAULT &
+ DASD_FEATURE_PATH_AUTODISABLE) != 0;
+ return snprintf(buf, PAGE_SIZE, flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_path_autodisable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int rc;
+
+ if (kstrtouint(buf, 0, &val) || val > 1)
+ return -EINVAL;
+
+ rc = dasd_set_feature(to_ccwdev(dev),
+ DASD_FEATURE_PATH_AUTODISABLE, val);
+
+ return rc ? : count;
+}
+
+static DEVICE_ATTR(path_autodisable, 0644,
+ dasd_path_autodisable_show,
+ dasd_path_autodisable_store);
/*
* interval for IFCC/CCC checks
* meaning time with no IFCC/CCC error before the error counter
@@ -1623,6 +1663,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_host_access_count.attr,
&dev_attr_path_masks.attr,
&dev_attr_path_threshold.attr,
+ &dev_attr_path_autodisable.attr,
&dev_attr_path_interval.attr,
&dev_attr_path_reset.attr,
&dev_attr_hpf.attr,
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f035c2f25d35..131f1989f6f3 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -27,7 +27,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/vtoc.h>
-#include <asm/diag.h>
#include "dasd_int.h"
#include "dasd_diag.h"
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 29397a9dba68..be208e7adcb4 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -214,24 +214,25 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
geo->head |= head;
}
-static int check_XRC(struct ccw1 *ccw, struct DE_eckd_data *data,
+static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc;
- if (!private->rdc_data.facilities.XRC_supported)
+ rc = get_phys_clock(&data->ep_sys_time);
+ /*
+ * Ignore return code if XRC is not supported or
+ * sync clock is switched off
+ */
+ if ((rc && !private->rdc_data.facilities.XRC_supported) ||
+ rc == -EOPNOTSUPP || rc == -EACCES)
return 0;
/* switch on System Time Stamp - needed for XRC Support */
data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
- rc = get_phys_clock(&data->ep_sys_time);
- /* Ignore return code if sync clock is switched off. */
- if (rc == -EOPNOTSUPP || rc == -EACCES)
- rc = 0;
-
if (ccw) {
ccw->count = sizeof(struct DE_eckd_data);
ccw->flags |= CCW_FLAG_SLI;
@@ -286,12 +287,12 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
case DASD_ECKD_CCW_WRITE_KD_MT:
data->mask.perm = 0x02;
data->attributes.operation = private->attrib.operation;
- rc = check_XRC(ccw, data, device);
+ rc = set_timestamp(ccw, data, device);
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
data->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC(ccw, data, device);
+ rc = set_timestamp(ccw, data, device);
break;
case DASD_ECKD_CCW_ERASE:
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
@@ -299,7 +300,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
data->mask.perm = 0x3;
data->mask.auth = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC(ccw, data, device);
+ rc = set_timestamp(ccw, data, device);
break;
case DASD_ECKD_CCW_WRITE_FULL_TRACK:
data->mask.perm = 0x03;
@@ -310,7 +311,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
data->mask.perm = 0x02;
data->attributes.operation = private->attrib.operation;
data->blk_size = blksize;
- rc = check_XRC(ccw, data, device);
+ rc = set_timestamp(ccw, data, device);
break;
default:
dev_err(&device->cdev->dev,
@@ -993,7 +994,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
struct dasd_eckd_private *private, path_private;
struct dasd_uid *uid;
char print_path_uid[60], print_device_uid[60];
- struct channel_path_desc *chp_desc;
+ struct channel_path_desc_fmt0 *chp_desc;
struct subchannel_id sch_id;
private = device->private;
@@ -3440,7 +3441,7 @@ static int prepare_itcw(struct itcw *itcw,
dedata->mask.perm = 0x02;
dedata->attributes.operation = basepriv->attrib.operation;
dedata->blk_size = blksize;
- rc = check_XRC(NULL, dedata, basedev);
+ rc = set_timestamp(NULL, dedata, basedev);
dedata->ga_extended |= 0x42;
lredata->operation.orientation = 0x0;
lredata->operation.operation = 0x3F;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 7bdc6aaa0ba3..2016e0ed5865 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -18,7 +18,6 @@
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <linux/slab.h>
-#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/schid.h>
#include <asm/cmb.h>
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index c33788a829c3..5cb80c645489 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -131,19 +131,6 @@ static const struct seq_operations dasd_devices_seq_ops = {
.show = dasd_devices_show,
};
-static int dasd_devices_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &dasd_devices_seq_ops);
-}
-
-static const struct file_operations dasd_devices_file_ops = {
- .owner = THIS_MODULE,
- .open = dasd_devices_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
#ifdef CONFIG_DASD_PROFILE
static int dasd_stats_all_block_on(void)
{
@@ -352,10 +339,10 @@ dasd_proc_init(void)
dasd_proc_root_entry = proc_mkdir("dasd", NULL);
if (!dasd_proc_root_entry)
goto out_nodasd;
- dasd_devices_entry = proc_create("devices",
+ dasd_devices_entry = proc_create_seq("devices",
S_IFREG | S_IRUGO | S_IWUSR,
dasd_proc_root_entry,
- &dasd_devices_file_ops);
+ &dasd_devices_seq_ops);
if (!dasd_devices_entry)
goto out_nodevices;
dasd_statistics_entry = proc_create("statistics",
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index a2b33a22c82a..0a4c13e1e76e 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -23,7 +23,7 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
- sclp_early.o sclp_early_core.o
+ sclp_early.o sclp_early_core.o sclp_sd.o
obj-$(CONFIG_TN3270) += raw3270.o
obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -54,3 +54,6 @@ obj-$(CONFIG_CRASH_DUMP) += sclp_sdias.o zcore.o
hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
obj-$(CONFIG_HMC_DRV) += hmcdrv.o
+
+chkbss := sclp_early_core.o
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 98a5c459a1bf..60845d467a1b 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -9,7 +9,9 @@
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
-u_short plain_map[NR_KEYS] = {
+#include "keyboard.h"
+
+u_short ebc_plain_map[NR_KEYS] = {
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
@@ -85,12 +87,12 @@ static u_short shift_ctrl_map[NR_KEYS] = {
0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-ushort *key_maps[MAX_NR_KEYMAPS] = {
- plain_map, shift_map, NULL, NULL,
+ushort *ebc_key_maps[MAX_NR_KEYMAPS] = {
+ ebc_plain_map, shift_map, NULL, NULL,
ctrl_map, shift_ctrl_map, NULL,
};
-unsigned int keymap_count = 4;
+unsigned int ebc_keymap_count = 4;
/*
@@ -99,7 +101,7 @@ unsigned int keymap_count = 4;
* the default and allocate dynamically in chunks of 512 bytes.
*/
-char func_buf[] = {
+char ebc_func_buf[] = {
'\033', '[', '[', 'A', 0,
'\033', '[', '[', 'B', 0,
'\033', '[', '[', 'C', 0,
@@ -123,37 +125,37 @@ char func_buf[] = {
};
-char *funcbufptr = func_buf;
-int funcbufsize = sizeof(func_buf);
-int funcbufleft = 0; /* space left */
-
-char *func_table[MAX_NR_FUNC] = {
- func_buf + 0,
- func_buf + 5,
- func_buf + 10,
- func_buf + 15,
- func_buf + 20,
- func_buf + 25,
- func_buf + 31,
- func_buf + 37,
- func_buf + 43,
- func_buf + 49,
- func_buf + 55,
- func_buf + 61,
- func_buf + 67,
- func_buf + 73,
- func_buf + 79,
- func_buf + 85,
- func_buf + 91,
- func_buf + 97,
- func_buf + 103,
- func_buf + 109,
+char *ebc_funcbufptr = ebc_func_buf;
+int ebc_funcbufsize = sizeof(ebc_func_buf);
+int ebc_funcbufleft; /* space left */
+
+char *ebc_func_table[MAX_NR_FUNC] = {
+ ebc_func_buf + 0,
+ ebc_func_buf + 5,
+ ebc_func_buf + 10,
+ ebc_func_buf + 15,
+ ebc_func_buf + 20,
+ ebc_func_buf + 25,
+ ebc_func_buf + 31,
+ ebc_func_buf + 37,
+ ebc_func_buf + 43,
+ ebc_func_buf + 49,
+ ebc_func_buf + 55,
+ ebc_func_buf + 61,
+ ebc_func_buf + 67,
+ ebc_func_buf + 73,
+ ebc_func_buf + 79,
+ ebc_func_buf + 85,
+ ebc_func_buf + 91,
+ ebc_func_buf + 97,
+ ebc_func_buf + 103,
+ ebc_func_buf + 109,
NULL,
};
-struct kbdiacruc accent_table[MAX_DIACR] = {
+struct kbdiacruc ebc_accent_table[MAX_DIACR] = {
{'^', 'c', 0003}, {'^', 'd', 0004},
{'^', 'z', 0032}, {'^', 0012, 0000},
};
-unsigned int accent_table_size = 4;
+unsigned int ebc_accent_table_size = 4;
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 61822480a2a0..16a4e8528bbc 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 5b505fdaedec..db1fbf9b00b5 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -54,24 +54,24 @@ kbd_alloc(void) {
kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
if (!kbd)
goto out;
- kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL);
+ kbd->key_maps = kzalloc(sizeof(ebc_key_maps), GFP_KERNEL);
if (!kbd->key_maps)
goto out_kbd;
- for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
- if (key_maps[i]) {
- kbd->key_maps[i] = kmemdup(key_maps[i],
+ for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
+ if (ebc_key_maps[i]) {
+ kbd->key_maps[i] = kmemdup(ebc_key_maps[i],
sizeof(u_short) * NR_KEYS,
GFP_KERNEL);
if (!kbd->key_maps[i])
goto out_maps;
}
}
- kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
+ kbd->func_table = kzalloc(sizeof(ebc_func_table), GFP_KERNEL);
if (!kbd->func_table)
goto out_maps;
- for (i = 0; i < ARRAY_SIZE(func_table); i++) {
- if (func_table[i]) {
- kbd->func_table[i] = kstrdup(func_table[i],
+ for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++) {
+ if (ebc_func_table[i]) {
+ kbd->func_table[i] = kstrdup(ebc_func_table[i],
GFP_KERNEL);
if (!kbd->func_table[i])
goto out_func;
@@ -81,22 +81,22 @@ kbd_alloc(void) {
kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
if (!kbd->fn_handler)
goto out_func;
- kbd->accent_table = kmemdup(accent_table,
+ kbd->accent_table = kmemdup(ebc_accent_table,
sizeof(struct kbdiacruc) * MAX_DIACR,
GFP_KERNEL);
if (!kbd->accent_table)
goto out_fn_handler;
- kbd->accent_table_size = accent_table_size;
+ kbd->accent_table_size = ebc_accent_table_size;
return kbd;
out_fn_handler:
kfree(kbd->fn_handler);
out_func:
- for (i = 0; i < ARRAY_SIZE(func_table); i++)
+ for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
out_maps:
- for (i = 0; i < ARRAY_SIZE(key_maps); i++)
+ for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
out_kbd:
@@ -112,10 +112,10 @@ kbd_free(struct kbd_data *kbd)
kfree(kbd->accent_table);
kfree(kbd->fn_handler);
- for (i = 0; i < ARRAY_SIZE(func_table); i++)
+ for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
- for (i = 0; i < ARRAY_SIZE(key_maps); i++)
+ for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
kfree(kbd);
@@ -131,7 +131,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
int i, j, k;
memset(ascebc, 0x40, 256);
- for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+ for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
@@ -158,7 +158,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
int i, j, k;
memset(ebcasc, ' ', 256);
- for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+ for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index a074d9711628..c467589c7f45 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -14,6 +14,17 @@
struct kbd_data;
+extern int ebc_funcbufsize, ebc_funcbufleft;
+extern char *ebc_func_table[MAX_NR_FUNC];
+extern char ebc_func_buf[];
+extern char *ebc_funcbufptr;
+extern unsigned int ebc_keymap_count;
+
+extern struct kbdiacruc ebc_accent_table[];
+extern unsigned int ebc_accent_table_size;
+extern unsigned short *ebc_key_maps[MAX_NR_KEYMAPS];
+extern unsigned short ebc_plain_map[NR_KEYS];
+
typedef void (fn_handler_fn)(struct kbd_data *);
/*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index e4e2df7a478e..e9aa71cdfc44 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -417,7 +417,7 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
reg = NULL;
list_for_each(l, &sclp_reg_list) {
reg = list_entry(l, struct sclp_register, list);
- if (reg->receive_mask & (1 << (32 - evbuf->type)))
+ if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
break;
else
reg = NULL;
@@ -618,9 +618,12 @@ struct sclp_statechangebuf {
u16 _zeros : 12;
u16 mask_length;
u64 sclp_active_facility_mask;
- sccb_mask_t sclp_receive_mask;
- sccb_mask_t sclp_send_mask;
- u32 read_data_function_mask;
+ u8 masks[2 * 1021 + 4]; /* variable length */
+ /*
+ * u8 sclp_receive_mask[mask_length];
+ * u8 sclp_send_mask[mask_length];
+ * u32 read_data_function_mask;
+ */
} __attribute__((packed));
@@ -631,14 +634,14 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
unsigned long flags;
struct sclp_statechangebuf *scbuf;
+ BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
+
scbuf = (struct sclp_statechangebuf *) evbuf;
- if (scbuf->mask_length != sizeof(sccb_mask_t))
- return;
spin_lock_irqsave(&sclp_lock, flags);
if (scbuf->validity_sclp_receive_mask)
- sclp_receive_mask = scbuf->sclp_receive_mask;
+ sclp_receive_mask = sccb_get_recv_mask(scbuf);
if (scbuf->validity_sclp_send_mask)
- sclp_send_mask = scbuf->sclp_send_mask;
+ sclp_send_mask = sccb_get_send_mask(scbuf);
spin_unlock_irqrestore(&sclp_lock, flags);
if (scbuf->validity_sclp_active_facility_mask)
sclp.facilities = scbuf->sclp_active_facility_mask;
@@ -748,7 +751,7 @@ EXPORT_SYMBOL(sclp_remove_processed);
/* Prepare init mask request. Called while sclp_lock is locked. */
static inline void
-__sclp_make_init_req(u32 receive_mask, u32 send_mask)
+__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
{
struct init_sccb *sccb;
@@ -761,12 +764,15 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask)
sclp_init_req.callback = NULL;
sclp_init_req.callback_data = NULL;
sclp_init_req.sccb = sccb;
- sccb->header.length = sizeof(struct init_sccb);
- sccb->mask_length = sizeof(sccb_mask_t);
- sccb->receive_mask = receive_mask;
- sccb->send_mask = send_mask;
- sccb->sclp_receive_mask = 0;
- sccb->sclp_send_mask = 0;
+ sccb->header.length = sizeof(*sccb);
+ if (sclp_mask_compat_mode)
+ sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
+ else
+ sccb->mask_length = sizeof(sccb_mask_t);
+ sccb_set_recv_mask(sccb, receive_mask);
+ sccb_set_send_mask(sccb, send_mask);
+ sccb_set_sclp_recv_mask(sccb, 0);
+ sccb_set_sclp_send_mask(sccb, 0);
}
/* Start init mask request. If calculate is non-zero, calculate the mask as
@@ -822,8 +828,8 @@ sclp_init_mask(int calculate)
sccb->header.response_code == 0x20) {
/* Successful request */
if (calculate) {
- sclp_receive_mask = sccb->sclp_receive_mask;
- sclp_send_mask = sccb->sclp_send_mask;
+ sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
+ sclp_send_mask = sccb_get_sclp_send_mask(sccb);
} else {
sclp_receive_mask = 0;
sclp_send_mask = 0;
@@ -974,12 +980,18 @@ sclp_check_interface(void)
irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
spin_lock_irqsave(&sclp_lock, flags);
del_timer(&sclp_request_timer);
- if (sclp_init_req.status == SCLP_REQ_DONE &&
- sccb->header.response_code == 0x20) {
- rc = 0;
- break;
- } else
- rc = -EBUSY;
+ rc = -EBUSY;
+ if (sclp_init_req.status == SCLP_REQ_DONE) {
+ if (sccb->header.response_code == 0x20) {
+ rc = 0;
+ break;
+ } else if (sccb->header.response_code == 0x74f0) {
+ if (!sclp_mask_compat_mode) {
+ sclp_mask_compat_mode = true;
+ retry = 0;
+ }
+ }
+ }
}
unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
spin_unlock_irqrestore(&sclp_lock, flags);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index f41f6e2ca063..1fe4918088e7 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -18,7 +18,7 @@
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
#define SCLP_CONSOLE_PAGES 6
-#define SCLP_EVTYP_MASK(T) (1U << (32 - (T)))
+#define SCLP_EVTYP_MASK(T) (1UL << (sizeof(sccb_mask_t) * BITS_PER_BYTE - (T)))
#define EVTYP_OPCMD 0x01
#define EVTYP_MSG 0x02
@@ -28,6 +28,7 @@
#define EVTYP_PMSGCMD 0x09
#define EVTYP_ASYNC 0x0A
#define EVTYP_CTLPROGIDENT 0x0B
+#define EVTYP_STORE_DATA 0x0C
#define EVTYP_ERRNOTIFY 0x18
#define EVTYP_VT220MSG 0x1A
#define EVTYP_SDIAS 0x1C
@@ -42,6 +43,7 @@
#define EVTYP_PMSGCMD_MASK SCLP_EVTYP_MASK(EVTYP_PMSGCMD)
#define EVTYP_ASYNC_MASK SCLP_EVTYP_MASK(EVTYP_ASYNC)
#define EVTYP_CTLPROGIDENT_MASK SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT)
+#define EVTYP_STORE_DATA_MASK SCLP_EVTYP_MASK(EVTYP_STORE_DATA)
#define EVTYP_ERRNOTIFY_MASK SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY)
#define EVTYP_VT220MSG_MASK SCLP_EVTYP_MASK(EVTYP_VT220MSG)
#define EVTYP_SDIAS_MASK SCLP_EVTYP_MASK(EVTYP_SDIAS)
@@ -85,7 +87,7 @@ enum sclp_pm_event {
#define SCLP_PANIC_PRIO 1
#define SCLP_PANIC_PRIO_CLIENT 0
-typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
+typedef u64 sccb_mask_t;
struct sccb_header {
u16 length;
@@ -98,12 +100,53 @@ struct init_sccb {
struct sccb_header header;
u16 _reserved;
u16 mask_length;
- sccb_mask_t receive_mask;
- sccb_mask_t send_mask;
- sccb_mask_t sclp_receive_mask;
- sccb_mask_t sclp_send_mask;
+ u8 masks[4 * 1021]; /* variable length */
+ /*
+ * u8 receive_mask[mask_length];
+ * u8 send_mask[mask_length];
+ * u8 sclp_receive_mask[mask_length];
+ * u8 sclp_send_mask[mask_length];
+ */
} __attribute__((packed));
+#define SCLP_MASK_SIZE_COMPAT 4
+
+static inline sccb_mask_t sccb_get_mask(u8 *masks, size_t len, int i)
+{
+ sccb_mask_t res = 0;
+
+ memcpy(&res, masks + i * len, min(sizeof(res), len));
+ return res;
+}
+
+static inline void sccb_set_mask(u8 *masks, size_t len, int i, sccb_mask_t val)
+{
+ memset(masks + i * len, 0, len);
+ memcpy(masks + i * len, &val, min(sizeof(val), len));
+}
+
+#define sccb_get_generic_mask(sccb, i) \
+({ \
+ __typeof__(sccb) __sccb = sccb; \
+ \
+ sccb_get_mask(__sccb->masks, __sccb->mask_length, i); \
+})
+#define sccb_get_recv_mask(sccb) sccb_get_generic_mask(sccb, 0)
+#define sccb_get_send_mask(sccb) sccb_get_generic_mask(sccb, 1)
+#define sccb_get_sclp_recv_mask(sccb) sccb_get_generic_mask(sccb, 2)
+#define sccb_get_sclp_send_mask(sccb) sccb_get_generic_mask(sccb, 3)
+
+#define sccb_set_generic_mask(sccb, i, val) \
+({ \
+ __typeof__(sccb) __sccb = sccb; \
+ \
+ sccb_set_mask(__sccb->masks, __sccb->mask_length, i, val); \
+})
+#define sccb_set_recv_mask(sccb, val) sccb_set_generic_mask(sccb, 0, val)
+#define sccb_set_send_mask(sccb, val) sccb_set_generic_mask(sccb, 1, val)
+#define sccb_set_sclp_recv_mask(sccb, val) sccb_set_generic_mask(sccb, 2, val)
+#define sccb_set_sclp_send_mask(sccb, val) sccb_set_generic_mask(sccb, 3, val)
+
struct read_cpu_info_sccb {
struct sccb_header header;
u16 nr_configured;
@@ -221,15 +264,17 @@ extern int sclp_init_state;
extern int sclp_console_pages;
extern int sclp_console_drop;
extern unsigned long sclp_console_full;
+extern bool sclp_mask_compat_mode;
extern char sclp_early_sccb[PAGE_SIZE];
void sclp_early_wait_irq(void);
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
+unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb);
int sclp_early_set_event_mask(struct init_sccb *sccb,
- unsigned long receive_mask,
- unsigned long send_mask);
+ sccb_mask_t receive_mask,
+ sccb_mask_t send_mask);
/* useful inlines */
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index a78cea0c3a09..248b5db3eaa8 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
-#include <asm/compat.h>
#include <asm/sclp_ctl.h>
#include <asm/sclp.h>
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 6b1891539c84..9a74abb9224d 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -249,7 +249,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb)
if (sccb->header.response_code != 0x20)
return;
- if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
+ if (sclp_early_con_check_vt220(sccb))
sclp.has_vt220 = 1;
if (sclp_early_con_check_linemode(sccb))
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 17b0c67f3e8d..eceba3858cef 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -14,6 +14,11 @@
char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
+/*
+ * Used to keep track of the size of the event masks. Qemu until version 2.11
+ * only supports 4 and needs a workaround.
+ */
+bool sclp_mask_compat_mode __section(.data);
void sclp_early_wait_irq(void)
{
@@ -142,16 +147,24 @@ static void sclp_early_print_vt220(const char *str, unsigned int len)
}
int sclp_early_set_event_mask(struct init_sccb *sccb,
- unsigned long receive_mask,
- unsigned long send_mask)
+ sccb_mask_t receive_mask,
+ sccb_mask_t send_mask)
{
+retry:
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
- sccb->mask_length = sizeof(sccb_mask_t);
- sccb->receive_mask = receive_mask;
- sccb->send_mask = send_mask;
+ if (sclp_mask_compat_mode)
+ sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
+ else
+ sccb->mask_length = sizeof(sccb_mask_t);
+ sccb_set_recv_mask(sccb, receive_mask);
+ sccb_set_send_mask(sccb, send_mask);
if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_MASK, sccb))
return -EIO;
+ if ((sccb->header.response_code == 0x74f0) && !sclp_mask_compat_mode) {
+ sclp_mask_compat_mode = true;
+ goto retry;
+ }
if (sccb->header.response_code != 0x20)
return -EIO;
return 0;
@@ -159,19 +172,28 @@ int sclp_early_set_event_mask(struct init_sccb *sccb,
unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb)
{
- if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
+ if (!(sccb_get_sclp_send_mask(sccb) & EVTYP_OPCMD_MASK))
return 0;
- if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
+ if (!(sccb_get_sclp_recv_mask(sccb) & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
return 1;
}
+unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb)
+{
+ if (sccb_get_sclp_send_mask(sccb) & EVTYP_VT220MSG_MASK)
+ return 1;
+ return 0;
+}
+
static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
{
unsigned long receive_mask, send_mask;
struct init_sccb *sccb;
int rc;
+ BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE);
+
*have_linemode = *have_vt220 = 0;
sccb = (struct init_sccb *) &sclp_early_sccb;
receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
@@ -180,7 +202,7 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
if (rc)
return rc;
*have_linemode = sclp_early_con_check_linemode(sccb);
- *have_vt220 = sccb->send_mask & EVTYP_VT220MSG_MASK;
+ *have_vt220 = !!(sccb_get_send_mask(sccb) & EVTYP_VT220MSG_MASK);
return rc;
}
diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
new file mode 100644
index 000000000000..99f41db5123b
--- /dev/null
+++ b/drivers/s390/char/sclp_sd.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP Store Data support and sysfs interface
+ *
+ * Copyright IBM Corp. 2017
+ */
+
+#define KMSG_COMPONENT "sclp_sd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/async.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <asm/pgalloc.h>
+
+#include "sclp.h"
+
+#define SD_EQ_STORE_DATA 0
+#define SD_EQ_HALT 1
+#define SD_EQ_SIZE 2
+
+#define SD_DI_CONFIG 3
+
+struct sclp_sd_evbuf {
+ struct evbuf_header hdr;
+ u8 eq;
+ u8 di;
+ u8 rflags;
+ u64 :56;
+ u32 id;
+ u16 :16;
+ u8 fmt;
+ u8 status;
+ u64 sat;
+ u64 sa;
+ u32 esize;
+ u32 dsize;
+} __packed;
+
+struct sclp_sd_sccb {
+ struct sccb_header hdr;
+ struct sclp_sd_evbuf evbuf;
+} __packed __aligned(PAGE_SIZE);
+
+/**
+ * struct sclp_sd_data - Result of a Store Data request
+ * @esize_bytes: Resulting esize in bytes
+ * @dsize_bytes: Resulting dsize in bytes
+ * @data: Pointer to data - must be released using vfree()
+ */
+struct sclp_sd_data {
+ size_t esize_bytes;
+ size_t dsize_bytes;
+ void *data;
+};
+
+/**
+ * struct sclp_sd_listener - Listener for asynchronous Store Data response
+ * @list: For enqueueing this struct
+ * @id: Event ID of response to listen for
+ * @completion: Can be used to wait for response
+ * @evbuf: Contains the resulting Store Data response after completion
+ */
+struct sclp_sd_listener {
+ struct list_head list;
+ u32 id;
+ struct completion completion;
+ struct sclp_sd_evbuf evbuf;
+};
+
+/**
+ * struct sclp_sd_file - Sysfs representation of a Store Data entity
+ * @kobj: Kobject
+ * @data_attr: Attribute for accessing data contents
+ * @data_mutex: Mutex to serialize access and updates to @data
+ * @data: Data associated with this entity
+ * @di: DI value associated with this entity
+ */
+struct sclp_sd_file {
+ struct kobject kobj;
+ struct bin_attribute data_attr;
+ struct mutex data_mutex;
+ struct sclp_sd_data data;
+ u8 di;
+};
+#define to_sd_file(x) container_of(x, struct sclp_sd_file, kobj)
+
+static struct kset *sclp_sd_kset;
+static struct sclp_sd_file *config_file;
+
+static LIST_HEAD(sclp_sd_queue);
+static DEFINE_SPINLOCK(sclp_sd_queue_lock);
+
+/**
+ * sclp_sd_listener_add() - Add listener for Store Data responses
+ * @listener: Listener to add
+ */
+static void sclp_sd_listener_add(struct sclp_sd_listener *listener)
+{
+ spin_lock_irq(&sclp_sd_queue_lock);
+ list_add_tail(&listener->list, &sclp_sd_queue);
+ spin_unlock_irq(&sclp_sd_queue_lock);
+}
+
+/**
+ * sclp_sd_listener_remove() - Remove listener for Store Data responses
+ * @listener: Listener to remove
+ */
+static void sclp_sd_listener_remove(struct sclp_sd_listener *listener)
+{
+ spin_lock_irq(&sclp_sd_queue_lock);
+ list_del(&listener->list);
+ spin_unlock_irq(&sclp_sd_queue_lock);
+}
+
+/**
+ * sclp_sd_listener_init() - Initialize a Store Data response listener
+ * @id: Event ID to listen for
+ *
+ * Initialize a listener for asynchronous Store Data responses. This listener
+ * can afterwards be used to wait for a specific response and to retrieve
+ * the associated response data.
+ */
+static void sclp_sd_listener_init(struct sclp_sd_listener *listener, u32 id)
+{
+ memset(listener, 0, sizeof(*listener));
+ listener->id = id;
+ init_completion(&listener->completion);
+}
+
+/**
+ * sclp_sd_receiver() - Receiver for Store Data events
+ * @evbuf_hdr: Header of received events
+ *
+ * Process Store Data events and complete listeners with matching event IDs.
+ */
+static void sclp_sd_receiver(struct evbuf_header *evbuf_hdr)
+{
+ struct sclp_sd_evbuf *evbuf = (struct sclp_sd_evbuf *) evbuf_hdr;
+ struct sclp_sd_listener *listener;
+ int found = 0;
+
+ pr_debug("received event (id=0x%08x)\n", evbuf->id);
+ spin_lock(&sclp_sd_queue_lock);
+ list_for_each_entry(listener, &sclp_sd_queue, list) {
+ if (listener->id != evbuf->id)
+ continue;
+
+ listener->evbuf = *evbuf;
+ complete(&listener->completion);
+ found = 1;
+ break;
+ }
+ spin_unlock(&sclp_sd_queue_lock);
+
+ if (!found)
+ pr_debug("unsolicited event (id=0x%08x)\n", evbuf->id);
+}
+
+static struct sclp_register sclp_sd_register = {
+ .send_mask = EVTYP_STORE_DATA_MASK,
+ .receive_mask = EVTYP_STORE_DATA_MASK,
+ .receiver_fn = sclp_sd_receiver,
+};
+
+/**
+ * sclp_sd_sync() - Perform Store Data request synchronously
+ * @page: Address of work page - must be below 2GB
+ * @eq: Input EQ value
+ * @di: Input DI value
+ * @sat: Input SAT value
+ * @sa: Input SA value used to specify the address of the target buffer
+ * @dsize_ptr: Optional pointer to input and output DSIZE value
+ * @esize_ptr: Optional pointer to output ESIZE value
+ *
+ * Perform Store Data request with specified parameters and wait for completion.
+ *
+ * Return %0 on success and store resulting DSIZE and ESIZE values in
+ * @dsize_ptr and @esize_ptr (if provided). Return non-zero on error.
+ */
+static int sclp_sd_sync(unsigned long page, u8 eq, u8 di, u64 sat, u64 sa,
+ u32 *dsize_ptr, u32 *esize_ptr)
+{
+ struct sclp_sd_sccb *sccb = (void *) page;
+ struct sclp_sd_listener listener;
+ struct sclp_sd_evbuf *evbuf;
+ int rc;
+
+ sclp_sd_listener_init(&listener, (u32) (addr_t) sccb);
+ sclp_sd_listener_add(&listener);
+
+ /* Prepare SCCB */
+ memset(sccb, 0, PAGE_SIZE);
+ sccb->hdr.length = sizeof(sccb->hdr) + sizeof(sccb->evbuf);
+ evbuf = &sccb->evbuf;
+ evbuf->hdr.length = sizeof(*evbuf);
+ evbuf->hdr.type = EVTYP_STORE_DATA;
+ evbuf->eq = eq;
+ evbuf->di = di;
+ evbuf->id = listener.id;
+ evbuf->fmt = 1;
+ evbuf->sat = sat;
+ evbuf->sa = sa;
+ if (dsize_ptr)
+ evbuf->dsize = *dsize_ptr;
+
+ /* Perform command */
+ pr_debug("request (eq=%d, di=%d, id=0x%08x)\n", eq, di, listener.id);
+ rc = sclp_sync_request(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+ pr_debug("request done (rc=%d)\n", rc);
+ if (rc)
+ goto out;
+
+ /* Evaluate response */
+ if (sccb->hdr.response_code == 0x73f0) {
+ pr_debug("event not supported\n");
+ rc = -EIO;
+ goto out_remove;
+ }
+ if (sccb->hdr.response_code != 0x0020 || !(evbuf->hdr.flags & 0x80)) {
+ rc = -EIO;
+ goto out;
+ }
+ if (!(evbuf->rflags & 0x80)) {
+ rc = wait_for_completion_interruptible(&listener.completion);
+ if (rc)
+ goto out;
+ evbuf = &listener.evbuf;
+ }
+ switch (evbuf->status) {
+ case 0:
+ if (dsize_ptr)
+ *dsize_ptr = evbuf->dsize;
+ if (esize_ptr)
+ *esize_ptr = evbuf->esize;
+ pr_debug("success (dsize=%u, esize=%u)\n", evbuf->dsize,
+ evbuf->esize);
+ break;
+ case 3:
+ rc = -ENOENT;
+ break;
+ default:
+ rc = -EIO;
+ break;
+
+ }
+
+out:
+ if (rc && rc != -ENOENT) {
+ /* Provide some information about what went wrong */
+ pr_warn("Store Data request failed (eq=%d, di=%d, "
+ "response=0x%04x, flags=0x%02x, status=%d, rc=%d)\n",
+ eq, di, sccb->hdr.response_code, evbuf->hdr.flags,
+ evbuf->status, rc);
+ }
+
+out_remove:
+ sclp_sd_listener_remove(&listener);
+
+ return rc;
+}
+
+/**
+ * sclp_sd_store_data() - Obtain data for specified Store Data entity
+ * @result: Resulting data
+ * @di: DI value associated with this entity
+ *
+ * Perform a series of Store Data requests to obtain the size and contents of
+ * the specified Store Data entity.
+ *
+ * Return:
+ * %0: Success - result is stored in @result. @result->data must be
+ * released using vfree() after use.
+ * %-ENOENT: No data available for this entity
+ * %<0: Other error
+ */
+static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di)
+{
+ u32 dsize = 0, esize = 0;
+ unsigned long page, asce = 0;
+ void *data = NULL;
+ int rc;
+
+ page = __get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!page)
+ return -ENOMEM;
+
+ /* Get size */
+ rc = sclp_sd_sync(page, SD_EQ_SIZE, di, 0, 0, &dsize, &esize);
+ if (rc)
+ goto out;
+ if (dsize == 0)
+ goto out_result;
+
+ /* Allocate memory */
+ data = vzalloc((size_t) dsize * PAGE_SIZE);
+ if (!data) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Get translation table for buffer */
+ asce = base_asce_alloc((unsigned long) data, dsize);
+ if (!asce) {
+ vfree(data);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Get data */
+ rc = sclp_sd_sync(page, SD_EQ_STORE_DATA, di, asce, (u64) data, &dsize,
+ &esize);
+ if (rc) {
+ /* Cancel running request if interrupted */
+ if (rc == -ERESTARTSYS)
+ sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL);
+ vfree(data);
+ goto out;
+ }
+
+out_result:
+ result->esize_bytes = (size_t) esize * PAGE_SIZE;
+ result->dsize_bytes = (size_t) dsize * PAGE_SIZE;
+ result->data = data;
+
+out:
+ base_asce_free(asce);
+ free_page(page);
+
+ return rc;
+}
+
+/**
+ * sclp_sd_data_reset() - Reset Store Data result buffer
+ * @data: Data buffer to reset
+ *
+ * Reset @data to initial state and release associated memory.
+ */
+static void sclp_sd_data_reset(struct sclp_sd_data *data)
+{
+ vfree(data->data);
+ data->data = NULL;
+ data->dsize_bytes = 0;
+ data->esize_bytes = 0;
+}
+
+/**
+ * sclp_sd_file_release() - Release function for sclp_sd_file object
+ * @kobj: Kobject embedded in sclp_sd_file object
+ */
+static void sclp_sd_file_release(struct kobject *kobj)
+{
+ struct sclp_sd_file *sd_file = to_sd_file(kobj);
+
+ sclp_sd_data_reset(&sd_file->data);
+ kfree(sd_file);
+}
+
+/**
+ * sclp_sd_file_update() - Update contents of sclp_sd_file object
+ * @sd_file: Object to update
+ *
+ * Obtain the current version of data associated with the Store Data entity
+ * @sd_file.
+ *
+ * On success, return %0 and generate a KOBJ_CHANGE event to indicate that the
+ * data may have changed. Return non-zero otherwise.
+ */
+static int sclp_sd_file_update(struct sclp_sd_file *sd_file)
+{
+ const char *name = kobject_name(&sd_file->kobj);
+ struct sclp_sd_data data;
+ int rc;
+
+ rc = sclp_sd_store_data(&data, sd_file->di);
+ if (rc) {
+ if (rc == -ENOENT) {
+ pr_info("No data is available for the %s data entity\n",
+ name);
+ }
+ return rc;
+ }
+
+ mutex_lock(&sd_file->data_mutex);
+ sclp_sd_data_reset(&sd_file->data);
+ sd_file->data = data;
+ mutex_unlock(&sd_file->data_mutex);
+
+ pr_info("A %zu-byte %s data entity was retrieved\n", data.dsize_bytes,
+ name);
+ kobject_uevent(&sd_file->kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * sclp_sd_file_update_async() - Wrapper for asynchronous update call
+ * @data: Object to update
+ */
+static void sclp_sd_file_update_async(void *data, async_cookie_t cookie)
+{
+ struct sclp_sd_file *sd_file = data;
+
+ sclp_sd_file_update(sd_file);
+}
+
+/**
+ * reload_store() - Store function for "reload" sysfs attribute
+ * @kobj: Kobject of sclp_sd_file object
+ *
+ * Initiate a reload of the data associated with an sclp_sd_file object.
+ */
+static ssize_t reload_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sclp_sd_file *sd_file = to_sd_file(kobj);
+
+ sclp_sd_file_update(sd_file);
+
+ return count;
+}
+
+static struct kobj_attribute reload_attr = __ATTR_WO(reload);
+
+static struct attribute *sclp_sd_file_default_attrs[] = {
+ &reload_attr.attr,
+ NULL,
+};
+
+static struct kobj_type sclp_sd_file_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = sclp_sd_file_release,
+ .default_attrs = sclp_sd_file_default_attrs,
+};
+
+/**
+ * data_read() - Read function for "read" sysfs attribute
+ * @kobj: Kobject of sclp_sd_file object
+ * @buffer: Target buffer
+ * @off: Requested file offset
+ * @size: Requested number of bytes
+ *
+ * Store the requested portion of the Store Data entity contents into the
+ * specified buffer. Return the number of bytes stored on success, or %0
+ * on EOF.
+ */
+static ssize_t data_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buffer,
+ loff_t off, size_t size)
+{
+ struct sclp_sd_file *sd_file = to_sd_file(kobj);
+ size_t data_size;
+ char *data;
+
+ mutex_lock(&sd_file->data_mutex);
+
+ data = sd_file->data.data;
+ data_size = sd_file->data.dsize_bytes;
+ if (!data || off >= data_size) {
+ size = 0;
+ } else {
+ if (off + size > data_size)
+ size = data_size - off;
+ memcpy(buffer, data + off, size);
+ }
+
+ mutex_unlock(&sd_file->data_mutex);
+
+ return size;
+}
+
+/**
+ * sclp_sd_file_create() - Add a sysfs file representing a Store Data entity
+ * @name: Name of file
+ * @di: DI value associated with this entity
+ *
+ * Create a sysfs directory with the given @name located under
+ *
+ * /sys/firmware/sclp_sd/
+ *
+ * The files in this directory can be used to access the contents of the Store
+ * Data entity associated with @DI.
+ *
+ * Return pointer to resulting sclp_sd_file object on success, %NULL otherwise.
+ * The object must be freed by calling kobject_put() on the embedded kobject
+ * pointer after use.
+ */
+static __init struct sclp_sd_file *sclp_sd_file_create(const char *name, u8 di)
+{
+ struct sclp_sd_file *sd_file;
+ int rc;
+
+ sd_file = kzalloc(sizeof(*sd_file), GFP_KERNEL);
+ if (!sd_file)
+ return NULL;
+ sd_file->di = di;
+ mutex_init(&sd_file->data_mutex);
+
+ /* Create kobject located under /sys/firmware/sclp_sd/ */
+ sd_file->kobj.kset = sclp_sd_kset;
+ rc = kobject_init_and_add(&sd_file->kobj, &sclp_sd_file_ktype, NULL,
+ "%s", name);
+ if (rc) {
+ kobject_put(&sd_file->kobj);
+ return NULL;
+ }
+
+ sysfs_bin_attr_init(&sd_file->data_attr);
+ sd_file->data_attr.attr.name = "data";
+ sd_file->data_attr.attr.mode = 0444;
+ sd_file->data_attr.read = data_read;
+
+ rc = sysfs_create_bin_file(&sd_file->kobj, &sd_file->data_attr);
+ if (rc) {
+ kobject_put(&sd_file->kobj);
+ return NULL;
+ }
+
+ /*
+ * For completeness only - users interested in entity data should listen
+ * for KOBJ_CHANGE instead.
+ */
+ kobject_uevent(&sd_file->kobj, KOBJ_ADD);
+
+ /* Don't let a slow Store Data request delay further initialization */
+ async_schedule(sclp_sd_file_update_async, sd_file);
+
+ return sd_file;
+}
+
+/**
+ * sclp_sd_init() - Initialize sclp_sd support and register sysfs files
+ */
+static __init int sclp_sd_init(void)
+{
+ int rc;
+
+ rc = sclp_register(&sclp_sd_register);
+ if (rc)
+ return rc;
+
+ /* Create kset named "sclp_sd" located under /sys/firmware/ */
+ rc = -ENOMEM;
+ sclp_sd_kset = kset_create_and_add("sclp_sd", NULL, firmware_kobj);
+ if (!sclp_sd_kset)
+ goto err_kset;
+
+ rc = -EINVAL;
+ config_file = sclp_sd_file_create("config", SD_DI_CONFIG);
+ if (!config_file)
+ goto err_config;
+
+ return 0;
+
+err_config:
+ kset_unregister(sclp_sd_kset);
+err_kset:
+ sclp_unregister(&sclp_sd_register);
+
+ return rc;
+}
+device_initcall(sclp_sd_init);
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 9f7b87d6d434..5aff8b684eb2 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -502,7 +502,10 @@ sclp_tty_init(void)
int i;
int rc;
- if (!CONSOLE_IS_SCLP)
+ /* z/VM multiplexes the line mode output on the 32xx screen */
+ if (MACHINE_IS_VM && !CONSOLE_IS_SCLP)
+ return 0;
+ if (!sclp.has_linemode)
return 0;
driver = alloc_tty_driver(1);
if (!driver)
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index faae30476f4b..32a14ee31c6b 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -105,29 +105,14 @@ static const struct seq_operations tape_proc_seq = {
.show = tape_proc_show,
};
-static int tape_proc_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &tape_proc_seq);
-}
-
-static const struct file_operations tape_proc_ops =
-{
- .owner = THIS_MODULE,
- .open = tape_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
/*
* Initialize procfs stuff on startup
*/
void
tape_proc_init(void)
{
- tape_proc_devices =
- proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
- &tape_proc_ops);
+ tape_proc_devices = proc_create_seq("tapedevices",
+ S_IFREG | S_IRUGO | S_IWUSR, NULL, &tape_proc_seq);
if (tape_proc_devices == NULL) {
return;
}
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 17e411c57576..948ce82a7725 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -23,7 +23,6 @@
#include <linux/mutex.h>
#include <linux/cma.h>
#include <linux/mm.h>
-#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/vmcp.h>
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index bfec1485ca23..93b2862bd3fa 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -323,8 +323,10 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
struct ccw_dev_id dev_id;
int rc, i;
- gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
- GFP_KERNEL);
+ if (num_devices < 1)
+ return -EINVAL;
+
+ gdev = kzalloc(struct_size(gdev, cdev, num_devices), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
@@ -375,7 +377,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
goto error;
}
/* Check if the devices are bound to the required ccw driver. */
- if (gdev->count && gdrv && gdrv->ccw_driver &&
+ if (gdrv && gdrv->ccw_driver &&
gdev->cdev[0]->drv != gdrv->ccw_driver) {
rc = -EINVAL;
goto error;
@@ -558,6 +560,12 @@ static struct bus_type ccwgroup_bus_type = {
.pm = &ccwgroup_pm_ops,
};
+bool dev_is_ccwgroup(struct device *dev)
+{
+ return dev->bus == &ccwgroup_bus_type;
+}
+EXPORT_SYMBOL(dev_is_ccwgroup);
+
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index f95b452b8bbc..afbdee74147d 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -384,6 +384,28 @@ static ssize_t chp_chid_external_show(struct device *dev,
}
static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ rc = memory_read_from_buffer(buf, count, &off, chp->desc_fmt3.util_str,
+ sizeof(chp->desc_fmt3.util_str));
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static BIN_ATTR_RO(util_string,
+ sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
+
+static struct bin_attribute *chp_bin_attrs[] = {
+ &bin_attr_util_string,
+ NULL,
+};
+
static struct attribute *chp_attrs[] = {
&dev_attr_status.attr,
&dev_attr_configure.attr,
@@ -396,6 +418,7 @@ static struct attribute *chp_attrs[] = {
};
static struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
+ .bin_attrs = chp_bin_attrs,
};
static const struct attribute_group *chp_attr_groups[] = {
&chp_attr_group,
@@ -422,7 +445,7 @@ int chp_update_desc(struct channel_path *chp)
{
int rc;
- rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc);
+ rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc);
if (rc)
return rc;
@@ -431,6 +454,7 @@ int chp_update_desc(struct channel_path *chp)
* hypervisors implement the required chsc commands.
*/
chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+ chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3);
chsc_get_channel_measurement_chars(chp);
return 0;
@@ -506,20 +530,20 @@ out:
* On success return a newly allocated copy of the channel-path description
* data associated with the given channel-path ID. Return %NULL on error.
*/
-struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid)
+struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid)
{
struct channel_path *chp;
- struct channel_path_desc *desc;
+ struct channel_path_desc_fmt0 *desc;
chp = chpid_to_chp(chpid);
if (!chp)
return NULL;
- desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
+ desc = kmalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
mutex_lock(&chp->lock);
- memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
+ memcpy(desc, &chp->desc, sizeof(*desc));
mutex_unlock(&chp->lock);
return desc;
}
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 7e80323cd261..20259f3fbf45 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -44,8 +44,9 @@ struct channel_path {
struct chp_id chpid;
struct mutex lock; /* Serialize access to below members. */
int state;
- struct channel_path_desc desc;
+ struct channel_path_desc_fmt0 desc;
struct channel_path_desc_fmt1 desc_fmt1;
+ struct channel_path_desc_fmt3 desc_fmt3;
/* Channel-measurement related stuff: */
int cmg;
int shared;
@@ -61,7 +62,7 @@ static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
int chp_get_status(struct chp_id chpid);
u8 chp_get_sch_opm(struct subchannel *sch);
int chp_is_registered(struct chp_id chpid);
-struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid);
+struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid);
void chp_remove_cmg_attr(struct channel_path *chp);
int chp_add_cmg_attr(struct channel_path *chp);
int chp_update_desc(struct channel_path *chp);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c08fc5a8df0c..9029804dcd22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
+ struct channel_path *chp;
struct chp_link link;
struct chp_id chpid;
int status;
@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
- if (status < 0)
- chp_new(chpid);
- else if (!status)
+ if (!status)
return;
+
+ if (status < 0) {
+ chp_new(chpid);
+ } else {
+ chp = chpid_to_chp(chpid);
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
@@ -915,6 +923,8 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
+ if ((rfmt == 3) && !css_general_characteristics.util_str)
+ return -EINVAL;
memset(page, 0, PAGE_SIZE);
scpd_area = page;
@@ -940,43 +950,30 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
}
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
-int chsc_determine_base_channel_path_desc(struct chp_id chpid,
- struct channel_path_desc *desc)
-{
- struct chsc_scpd *scpd_area;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&chsc_page_lock, flags);
- scpd_area = chsc_page;
- ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
- if (ret)
- goto out;
-
- memcpy(desc, scpd_area->data, sizeof(*desc));
-out:
- spin_unlock_irqrestore(&chsc_page_lock, flags);
- return ret;
+#define chsc_det_chp_desc(FMT, c) \
+int chsc_determine_fmt##FMT##_channel_path_desc( \
+ struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc) \
+{ \
+ struct chsc_scpd *scpd_area; \
+ unsigned long flags; \
+ int ret; \
+ \
+ spin_lock_irqsave(&chsc_page_lock, flags); \
+ scpd_area = chsc_page; \
+ ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \
+ scpd_area); \
+ if (ret) \
+ goto out; \
+ \
+ memcpy(desc, scpd_area->data, sizeof(*desc)); \
+out: \
+ spin_unlock_irqrestore(&chsc_page_lock, flags); \
+ return ret; \
}
-int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
- struct channel_path_desc_fmt1 *desc)
-{
- struct chsc_scpd *scpd_area;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&chsc_page_lock, flags);
- scpd_area = chsc_page;
- ret = chsc_determine_channel_path_desc(chpid, 0, 1, 1, 0, scpd_area);
- if (ret)
- goto out;
-
- memcpy(desc, scpd_area->data, sizeof(*desc));
-out:
- spin_unlock_irqrestore(&chsc_page_lock, flags);
- return ret;
-}
+chsc_det_chp_desc(0, 0)
+chsc_det_chp_desc(1, 1)
+chsc_det_chp_desc(3, 0)
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index dda5953534b7..5c9f0dd33f4e 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -40,6 +40,11 @@ struct channel_path_desc_fmt1 {
u32 zeros[2];
} __attribute__ ((packed));
+struct channel_path_desc_fmt3 {
+ struct channel_path_desc_fmt1 fmt1_desc;
+ u8 util_str[64];
+};
+
struct channel_path;
struct css_chsc_char {
@@ -147,10 +152,12 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable);
int chsc_chp_vary(struct chp_id chpid, int on);
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
int c, int m, void *page);
-int chsc_determine_base_channel_path_desc(struct chp_id chpid,
- struct channel_path_desc *desc);
+int chsc_determine_fmt0_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt0 *desc);
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
struct channel_path_desc_fmt1 *desc);
+int chsc_determine_fmt3_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt3 *desc);
void chsc_chp_online(struct chp_id chpid);
void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 0015729d917d..8d9f36625ba5 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -16,7 +16,6 @@
#include <linux/miscdevice.h>
#include <linux/kernel_stat.h>
-#include <asm/compat.h>
#include <asm/cio.h>
#include <asm/chsc.h>
#include <asm/isc.h>
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 6886b3d34cf8..5130d7c67239 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -25,7 +25,6 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/setup.h>
-#include <asm/reset.h>
#include <asm/ipl.h>
#include <asm/chpid.h>
#include <asm/airq.h>
@@ -767,262 +766,6 @@ void cio_register_early_subchannels(void)
}
#endif /* CONFIG_CCW_CONSOLE */
-static int
-__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
-{
- int retry, cc;
-
- cc = 0;
- for (retry=0;retry<3;retry++) {
- schib->pmcw.ena = 0;
- cc = msch(schid, schib);
- if (cc)
- return (cc==3?-ENODEV:-EBUSY);
- if (stsch(schid, schib) || !css_sch_is_valid(schib))
- return -ENODEV;
- if (!schib->pmcw.ena)
- return 0;
- }
- return -EBUSY; /* uhm... */
-}
-
-static int
-__clear_io_subchannel_easy(struct subchannel_id schid)
-{
- int retry;
-
- if (csch(schid))
- return -ENODEV;
- for (retry=0;retry<20;retry++) {
- struct tpi_info ti;
-
- if (tpi(&ti)) {
- tsch(ti.schid, this_cpu_ptr(&cio_irb));
- if (schid_equal(&ti.schid, &schid))
- return 0;
- }
- udelay_simple(100);
- }
- return -EBUSY;
-}
-
-static void __clear_chsc_subchannel_easy(void)
-{
- /* It seems we can only wait for a bit here :/ */
- udelay_simple(100);
-}
-
-static int pgm_check_occured;
-
-static void cio_reset_pgm_check_handler(void)
-{
- pgm_check_occured = 1;
-}
-
-static int stsch_reset(struct subchannel_id schid, struct schib *addr)
-{
- int rc;
-
- pgm_check_occured = 0;
- s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
- rc = stsch(schid, addr);
- s390_base_pgm_handler_fn = NULL;
-
- /* The program check handler could have changed pgm_check_occured. */
- barrier();
-
- if (pgm_check_occured)
- return -EIO;
- else
- return rc;
-}
-
-static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
-{
- struct schib schib;
-
- if (stsch_reset(schid, &schib))
- return -ENXIO;
- if (!schib.pmcw.ena)
- return 0;
- switch(__disable_subchannel_easy(schid, &schib)) {
- case 0:
- case -ENODEV:
- break;
- default: /* -EBUSY */
- switch (schib.pmcw.st) {
- case SUBCHANNEL_TYPE_IO:
- if (__clear_io_subchannel_easy(schid))
- goto out; /* give up... */
- break;
- case SUBCHANNEL_TYPE_CHSC:
- __clear_chsc_subchannel_easy();
- break;
- default:
- /* No default clear strategy */
- break;
- }
- stsch(schid, &schib);
- __disable_subchannel_easy(schid, &schib);
- }
-out:
- return 0;
-}
-
-static atomic_t chpid_reset_count;
-
-static void s390_reset_chpids_mcck_handler(void)
-{
- struct crw crw;
- union mci mci;
-
- /* Check for pending channel report word. */
- mci.val = S390_lowcore.mcck_interruption_code;
- if (!mci.cp)
- return;
- /* Process channel report words. */
- while (stcrw(&crw) == 0) {
- /* Check for responses to RCHP. */
- if (crw.slct && crw.rsc == CRW_RSC_CPATH)
- atomic_dec(&chpid_reset_count);
- }
-}
-
-#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
-static void css_reset(void)
-{
- int i, ret;
- unsigned long long timeout;
- struct chp_id chpid;
-
- /* Reset subchannels. */
- for_each_subchannel(__shutdown_subchannel_easy, NULL);
- /* Reset channel paths. */
- s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
- /* Enable channel report machine checks. */
- __ctl_set_bit(14, 28);
- /* Temporarily reenable machine checks. */
- local_mcck_enable();
- chp_id_init(&chpid);
- for (i = 0; i <= __MAX_CHPID; i++) {
- chpid.id = i;
- ret = rchp(chpid);
- if ((ret == 0) || (ret == 2))
- /*
- * rchp either succeeded, or another rchp is already
- * in progress. In either case, we'll get a crw.
- */
- atomic_inc(&chpid_reset_count);
- }
- /* Wait for machine check for all channel paths. */
- timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
- while (atomic_read(&chpid_reset_count) != 0) {
- if (get_tod_clock_fast() > timeout)
- break;
- cpu_relax();
- }
- /* Disable machine checks again. */
- local_mcck_disable();
- /* Disable channel report machine checks. */
- __ctl_clear_bit(14, 28);
- s390_base_mcck_handler_fn = NULL;
-}
-
-static struct reset_call css_reset_call = {
- .fn = css_reset,
-};
-
-static int __init init_css_reset_call(void)
-{
- atomic_set(&chpid_reset_count, 0);
- register_reset_call(&css_reset_call);
- return 0;
-}
-
-arch_initcall(init_css_reset_call);
-
-struct sch_match_id {
- struct subchannel_id schid;
- struct ccw_dev_id devid;
- int rc;
-};
-
-static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
-{
- struct schib schib;
- struct sch_match_id *match_id = data;
-
- if (stsch_reset(schid, &schib))
- return -ENXIO;
- if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
- (schib.pmcw.dev == match_id->devid.devno) &&
- (schid.ssid == match_id->devid.ssid)) {
- match_id->schid = schid;
- match_id->rc = 0;
- return 1;
- }
- return 0;
-}
-
-static int reipl_find_schid(struct ccw_dev_id *devid,
- struct subchannel_id *schid)
-{
- struct sch_match_id match_id;
-
- match_id.devid = *devid;
- match_id.rc = -ENODEV;
- for_each_subchannel(__reipl_subchannel_match, &match_id);
- if (match_id.rc == 0)
- *schid = match_id.schid;
- return match_id.rc;
-}
-
-extern void do_reipl_asm(__u32 schid);
-
-/* Make sure all subchannels are quiet before we re-ipl an lpar. */
-void reipl_ccw_dev(struct ccw_dev_id *devid)
-{
- struct subchannel_id uninitialized_var(schid);
-
- s390_reset_system();
- if (reipl_find_schid(devid, &schid) != 0)
- panic("IPL Device not found\n");
- do_reipl_asm(*((__u32*)&schid));
-}
-
-int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
-{
- static struct chsc_sda_area sda_area __initdata;
- struct subchannel_id schid;
- struct schib schib;
-
- schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
- if (!schid.one)
- return -ENODEV;
-
- if (schid.ssid) {
- /*
- * Firmware should have already enabled MSS but whoever started
- * the kernel might have initiated a channel subsystem reset.
- * Ensure that MSS is enabled.
- */
- memset(&sda_area, 0, sizeof(sda_area));
- if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
- return -ENODEV;
- }
- if (stsch(schid, &schib))
- return -ENODEV;
- if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
- return -ENODEV;
- if (!schib.pmcw.dnv)
- return -ENODEV;
-
- iplinfo->ssid = schid.ssid;
- iplinfo->devno = schib.pmcw.dev;
- iplinfo->is_qdio = schib.pmcw.qf;
- return 0;
-}
-
/**
* cio_tm_start_key - perform start function
* @sch: subchannel on which to perform the start function
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f50ea035aa9b..1540229a37bb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1073,8 +1073,7 @@ out_schedule:
return 0;
}
-static int
-io_subchannel_remove (struct subchannel *sch)
+static int io_subchannel_remove(struct subchannel *sch)
{
struct io_subchannel_private *io_priv = to_io_private(sch);
struct ccw_device *cdev;
@@ -1082,14 +1081,12 @@ io_subchannel_remove (struct subchannel *sch)
cdev = sch_get_cdev(sch);
if (!cdev)
goto out_free;
- io_subchannel_quiesce(sch);
- /* Set ccw device to not operational and drop reference. */
- spin_lock_irq(cdev->ccwlock);
+
+ ccw_device_unregister(cdev);
+ spin_lock_irq(sch->lock);
sch_set_cdev(sch, NULL);
set_io_private(sch, NULL);
- cdev->private->state = DEV_STATE_NOT_OPER;
- spin_unlock_irq(cdev->ccwlock);
- ccw_device_unregister(cdev);
+ spin_unlock_irq(sch->lock);
out_free:
kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
@@ -1721,6 +1718,7 @@ static int ccw_device_remove(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = cdev->drv;
+ struct subchannel *sch;
int ret;
if (cdrv->remove)
@@ -1746,7 +1744,9 @@ static int ccw_device_remove(struct device *dev)
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
cdev->private->int_class = IRQIO_CIO;
+ sch = to_subchannel(cdev->dev.parent);
spin_unlock_irq(cdev->ccwlock);
+ io_subchannel_quiesce(sch);
__disable_cmf(cdev);
return 0;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 75ce12a24dc2..4435ae0b3027 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -460,8 +460,8 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
* On success return a newly allocated copy of the channel-path description
* data associated with the given channel path. Return %NULL on error.
*/
-struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev,
- int chp_idx)
+struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
+ int chp_idx)
{
struct subchannel *sch;
struct chp_id chpid;
@@ -473,6 +473,36 @@ struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev,
}
/**
+ * ccw_device_get_util_str() - return newly allocated utility strings
+ * @cdev: device to obtain the utility strings for
+ * @chp_idx: index of the channel path
+ *
+ * On success return a newly allocated copy of the utility strings
+ * associated with the given channel path. Return %NULL on error.
+ */
+u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_path *chp;
+ struct chp_id chpid;
+ u8 *util_str;
+
+ chp_id_init(&chpid);
+ chpid.id = sch->schib.pmcw.chpid[chp_idx];
+ chp = chpid_to_chp(chpid);
+
+ util_str = kmalloc(sizeof(chp->desc_fmt3.util_str), GFP_KERNEL);
+ if (!util_str)
+ return NULL;
+
+ mutex_lock(&chp->lock);
+ memcpy(util_str, chp->desc_fmt3.util_str, sizeof(chp->desc_fmt3.util_str));
+ mutex_unlock(&chp->lock);
+
+ return util_str;
+}
+
+/**
* ccw_device_get_id() - obtain a ccw device id
* @cdev: device to obtain the id for
* @dev_id: where to fill in the values
@@ -682,3 +712,4 @@ EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
+EXPORT_SYMBOL_GPL(ccw_device_get_util_str);
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 4fa9ee1d09fa..14d328338ce2 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -183,30 +183,6 @@ int chsc(void *chsc_area)
}
EXPORT_SYMBOL(chsc);
-static inline int __rchp(struct chp_id chpid)
-{
- register struct chp_id reg1 asm ("1") = chpid;
- int ccode;
-
- asm volatile(
- " lr 1,%1\n"
- " rchp\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode) : "d" (reg1) : "cc");
- return ccode;
-}
-
-int rchp(struct chp_id chpid)
-{
- int ccode;
-
- ccode = __rchp(chpid);
- trace_s390_cio_rchp(chpid, ccode);
-
- return ccode;
-}
-
static inline int __rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 35ad4ddd61e0..4be539cb9adc 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -20,7 +20,6 @@ int ssch(struct subchannel_id schid, union orb *addr);
int csch(struct subchannel_id schid);
int tpi(struct tpi_info *addr);
int chsc(void *chsc_area);
-int rchp(struct chp_id chpid);
int rsch(struct subchannel_id schid);
int hsch(struct subchannel_id schid);
int xsch(struct subchannel_id schid);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d5b02de02a3a..f4ca72dd862f 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -98,22 +98,6 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask,
return cc;
}
-static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
-{
- /* all done or next buffer state different */
- if (ccq == 0 || ccq == 32)
- return 0;
- /* no buffer processed */
- if (ccq == 97)
- return 1;
- /* not all buffers processed */
- if (ccq == 96)
- return 2;
- /* notify devices immediately */
- DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
- return -EIO;
-}
-
/**
* qdio_do_eqbs - extract buffer states for QEBSM
* @q: queue to manipulate
@@ -128,7 +112,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
- int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
+ int tmp_count = count, tmp_start = start, nr = q->nr;
unsigned int ccq = 0;
qperf_inc(q, eqbs);
@@ -138,34 +122,30 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
again:
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
auto_ack);
- rc = qdio_check_ccq(q, ccq);
- if (!rc)
- return count - tmp_count;
- if (rc == 1) {
- DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
- goto again;
- }
-
- if (rc == 2) {
+ switch (ccq) {
+ case 0:
+ case 32:
+ /* all done, or next buffer state different */
+ return count - tmp_count;
+ case 96:
+ /* not all buffers processed */
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
- /*
- * Retry once, if that fails bail out and process the
- * extracted buffers before trying again.
- */
- if (!retried++)
- goto again;
- else
- return count - tmp_count;
+ return count - tmp_count;
+ case 97:
+ /* no buffer processed */
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
+ goto again;
+ default:
+ DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
+ DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
+ q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
}
-
- DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
- DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
- q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
- q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
- return 0;
}
/**
@@ -185,7 +165,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
unsigned int ccq = 0;
int tmp_count = count, tmp_start = start;
int nr = q->nr;
- int rc;
if (!count)
return 0;
@@ -195,26 +174,32 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
- rc = qdio_check_ccq(q, ccq);
- if (!rc) {
+
+ switch (ccq) {
+ case 0:
+ case 32:
+ /* all done, or active buffer adapter-owned */
WARN_ON_ONCE(tmp_count);
return count - tmp_count;
- }
-
- if (rc == 1 || rc == 2) {
+ case 96:
+ /* not all buffers processed */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
qperf_inc(q, sqbs_partial);
goto again;
+ default:
+ DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
+ DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
+ q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
}
-
- DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
- DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
- q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
- q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
- return 0;
}
-/* returns number of examined buffers and their common state in *state */
+/*
+ * Returns number of examined buffers and their common state in *state.
+ * Requested number of buffers-to-examine must be > 0.
+ */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
int auto_ack, int merge_pending)
@@ -225,17 +210,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
- for (i = 0; i < count; i++) {
- if (!__state) {
- __state = q->slsb.val[bufnr];
- if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
- __state = SLSB_P_OUTPUT_EMPTY;
- } else if (merge_pending) {
- if ((q->slsb.val[bufnr] & __state) != __state)
- break;
- } else if (q->slsb.val[bufnr] != __state)
- break;
+ /* get initial state: */
+ __state = q->slsb.val[bufnr];
+ if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+ __state = SLSB_P_OUTPUT_EMPTY;
+
+ for (i = 1; i < count; i++) {
bufnr = next_buf(bufnr);
+
+ /* merge PENDING into EMPTY: */
+ if (merge_pending &&
+ q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
+ __state == SLSB_P_OUTPUT_EMPTY)
+ continue;
+
+ /* stop if next state differs from initial state: */
+ if (q->slsb.val[bufnr] != __state)
+ break;
}
*state = __state;
return i;
@@ -502,8 +493,8 @@ static inline void inbound_primed(struct qdio_q *q, int count)
static int get_inbound_buffer_frontier(struct qdio_q *q)
{
- int count, stop;
unsigned char state = 0;
+ int count;
q->timestamp = get_tod_clock_fast();
@@ -512,9 +503,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
* would return 0.
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
- stop = add_buf(q->first_to_check, count);
-
- if (q->first_to_check == stop)
+ if (!count)
goto out;
/*
@@ -734,8 +723,8 @@ void qdio_inbound_processing(unsigned long data)
static int get_outbound_buffer_frontier(struct qdio_q *q)
{
- int count, stop;
unsigned char state = 0;
+ int count;
q->timestamp = get_tod_clock_fast();
@@ -751,11 +740,11 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
* would return 0.
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
- stop = add_buf(q->first_to_check, count);
- if (q->first_to_check == stop)
+ if (!count)
goto out;
- count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
+ count = get_buf_states(q, q->first_to_check, &state, count, 0,
+ q->u.out.use_cq);
if (!count)
goto out;
@@ -1218,8 +1207,10 @@ no_cleanup:
qdio_shutdown_thinint(irq_ptr);
/* restore interrupt handler */
- if ((void *)cdev->handler == (void *)qdio_int_handler)
+ if ((void *)cdev->handler == (void *)qdio_int_handler) {
cdev->handler = irq_ptr->orig_handler;
+ cdev->private->intparm = 0;
+ }
spin_unlock_irq(get_ccwdev_lock(cdev));
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 98f3cfdc0d02..4c14ce428e92 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
int i;
for (i = 0; i < nr_queues; i++) {
- q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+ q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
{
struct ciw *ciw;
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
- int rc;
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,26 +492,23 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->equeue = *ciw;
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->aqueue = *ciw;
/* set new interrupt handler */
+ spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev));
irq_ptr->orig_handler = init_data->cdev->handler;
init_data->cdev->handler = qdio_int_handler;
+ spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
return 0;
-out_err:
- qdio_release_memory(irq_ptr);
- return rc;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 2c7550797ec2..dce92b2a895d 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp)
* and stores the result to ccwchain list. @cp must have been
* initialized by a previous call with cp_init(). Otherwise, undefined
* behavior occurs.
+ * For each chain composing the channel program:
+ * - On entry ch_len holds the count of CCWs to be translated.
+ * - On exit ch_len is adjusted to the count of successfully translated CCWs.
+ * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
*
* The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
* as helpers to do ccw chain translation inside the kernel. Basically
@@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp)
for (idx = 0; idx < len; idx++) {
ret = ccwchain_fetch_one(chain, idx, cp);
if (ret)
- return ret;
+ goto out_err;
}
}
return 0;
+out_err:
+ /* Only cleanup the chain elements that were actually translated. */
+ chain->ch_len = idx;
+ list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
+ chain->ch_len = 0;
+ }
+ return ret;
}
/**
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index c30420c517b1..3c800642134e 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
int ccode;
__u8 lpm;
unsigned long flags;
+ int ret;
sch = private->sch;
spin_lock_irqsave(sch->lock, flags);
private->state = VFIO_CCW_STATE_BUSY;
- spin_unlock_irqrestore(sch->lock, flags);
orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
@@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
* Initialize device status information
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
- return 0;
+ ret = 0;
+ break;
case 1: /* Status pending */
case 2: /* Busy */
- return -EBUSY;
+ ret = -EBUSY;
+ break;
case 3: /* Device/path not operational */
{
lpm = orb->cmd.lpm;
@@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
sch->lpm = 0;
if (cio_update_schib(sch))
- return -ENODEV;
-
- return sch->lpm ? -EACCES : -ENODEV;
+ ret = -ENODEV;
+ else
+ ret = sch->lpm ? -EACCES : -ENODEV;
+ break;
}
default:
- return ccode;
+ ret = ccode;
}
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
}
static void fsm_notoper(struct vfio_ccw_private *private,
@@ -124,6 +129,11 @@ static void fsm_io_request(struct vfio_ccw_private *private,
if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
orb = (union orb *)io_region->orb_area;
+ /* Don't try to build a cp if transport mode is specified. */
+ if (orb->tm.b) {
+ io_region->ret_code = -EOPNOTSUPP;
+ goto err_out;
+ }
io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
orb);
if (io_region->ret_code)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 48d55dc9e986..35a0c2b52f82 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -25,7 +25,6 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
-#include <asm/reset.h>
#include <asm/airq.h>
#include <linux/atomic.h>
#include <asm/isc.h>
@@ -1197,26 +1196,7 @@ static void ap_config_timeout(struct timer_list *unused)
queue_work(system_long_wq, &ap_scan_work);
}
-static void ap_reset_all(void)
-{
- int i, j;
-
- for (i = 0; i < AP_DOMAINS; i++) {
- if (!ap_test_config_domain(i))
- continue;
- for (j = 0; j < AP_DEVICES; j++) {
- if (!ap_test_config_card_id(j))
- continue;
- ap_rapq(AP_MKQID(j, i));
- }
- }
-}
-
-static struct reset_call ap_reset_call = {
- .fn = ap_reset_all,
-};
-
-int __init ap_debug_init(void)
+static int __init ap_debug_init(void)
{
ap_dbf_info = debug_register("ap", 1, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
@@ -1226,17 +1206,12 @@ int __init ap_debug_init(void)
return 0;
}
-void ap_debug_exit(void)
-{
- debug_unregister(ap_dbf_info);
-}
-
/**
* ap_module_init(): The module initialization code.
*
* Initializes the module.
*/
-int __init ap_module_init(void)
+static int __init ap_module_init(void)
{
int max_domain_id;
int rc, i;
@@ -1274,8 +1249,6 @@ int __init ap_module_init(void)
ap_airq_flag = (rc == 0);
}
- register_reset_call(&ap_reset_call);
-
/* Create /sys/bus/ap. */
rc = bus_register(&ap_bus_type);
if (rc)
@@ -1331,7 +1304,6 @@ out_bus:
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
bus_unregister(&ap_bus_type);
out:
- unregister_reset_call(&ap_reset_call);
if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq);
kfree(ap_configuration);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index e0827eaa42f1..6a273c5ebca5 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -17,7 +17,7 @@
#include <linux/types.h>
#include <asm/ap.h>
-#define AP_DEVICES 64 /* Number of AP devices. */
+#define AP_DEVICES 256 /* Number of AP devices. */
#define AP_DOMAINS 256 /* Number of AP domains. */
#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
@@ -198,11 +198,18 @@ struct ap_message {
*/
static inline void ap_init_message(struct ap_message *ap_msg)
{
- ap_msg->psmid = 0;
- ap_msg->length = 0;
- ap_msg->rc = 0;
- ap_msg->special = 0;
- ap_msg->receive = NULL;
+ memset(ap_msg, 0, sizeof(*ap_msg));
+}
+
+/**
+ * ap_release_message() - Release ap_message.
+ * Releases all memory used internal within the ap_message struct
+ * Currently this is the message and private field.
+ */
+static inline void ap_release_message(struct ap_message *ap_msg)
+{
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
}
#define for_each_ap_card(_ac) \
@@ -240,7 +247,4 @@ void ap_queue_resume(struct ap_device *ap_dev);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
-int ap_module_init(void);
-void ap_module_exit(void);
-
#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index 6a9d77c75ec3..dc675eb5aef6 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -23,7 +23,4 @@
extern debug_info_t *ap_dbf_info;
-int ap_debug_init(void);
-void ap_debug_exit(void);
-
#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index e7c2e4f9529a..ed80d00cdb6f 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -889,7 +889,7 @@ int pkey_findcard(const struct pkey_seckey *seckey,
u16 *pcardnr, u16 *pdomain, int verify)
{
struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
- struct zcrypt_device_matrix *device_matrix;
+ struct zcrypt_device_status_ext *device_status;
u16 card, dom;
u64 mkvp[2];
int i, rc, oi = -1;
@@ -899,18 +899,19 @@ int pkey_findcard(const struct pkey_seckey *seckey,
return -EINVAL;
/* fetch status of all crypto cards */
- device_matrix = kmalloc(sizeof(struct zcrypt_device_matrix),
+ device_status = kmalloc(MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext),
GFP_KERNEL);
- if (!device_matrix)
+ if (!device_status)
return -ENOMEM;
- zcrypt_device_status_mask(device_matrix);
+ zcrypt_device_status_mask_ext(device_status);
/* walk through all crypto cards */
- for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
- card = AP_QID_CARD(device_matrix->device[i].qid);
- dom = AP_QID_QUEUE(device_matrix->device[i].qid);
- if (device_matrix->device[i].online &&
- device_matrix->device[i].functions & 0x04) {
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ if (device_status[i].online &&
+ device_status[i].functions & 0x04) {
/* an enabled CCA Coprocessor card */
/* try cached mkvp */
if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
@@ -930,14 +931,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
mkvp_cache_scrub(card, dom);
}
}
- if (i >= MAX_ZDEV_ENTRIES) {
+ if (i >= MAX_ZDEV_ENTRIES_EXT) {
/* nothing found, so this time without cache */
- for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
- if (!(device_matrix->device[i].online &&
- device_matrix->device[i].functions & 0x04))
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ if (!(device_status[i].online &&
+ device_status[i].functions & 0x04))
continue;
- card = AP_QID_CARD(device_matrix->device[i].qid);
- dom = AP_QID_QUEUE(device_matrix->device[i].qid);
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
/* fresh fetch mkvp from adapter */
if (fetch_mkvp(card, dom, mkvp) == 0) {
mkvp_cache_update(card, dom, mkvp);
@@ -947,13 +948,13 @@ int pkey_findcard(const struct pkey_seckey *seckey,
oi = i;
}
}
- if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
+ if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
/* old mkvp matched, use this card then */
- card = AP_QID_CARD(device_matrix->device[oi].qid);
- dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
+ card = AP_QID_CARD(device_status[oi].qid);
+ dom = AP_QID_QUEUE(device_status[oi].qid);
}
}
- if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
+ if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
if (pcardnr)
*pcardnr = card;
if (pdomain)
@@ -962,7 +963,7 @@ int pkey_findcard(const struct pkey_seckey *seckey,
} else
rc = -ENODEV;
- kfree(device_matrix);
+ kfree(device_status);
return rc;
}
EXPORT_SYMBOL(pkey_findcard);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index ce15f101ee28..febcdb5135bf 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -18,8 +18,6 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
@@ -373,6 +371,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
+ ap_init_message(&ap_msg);
rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
if (rc)
goto out;
@@ -427,6 +426,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
spin_unlock(&zcrypt_list_lock);
out:
+ ap_release_message(&ap_msg);
trace_s390_zcrypt_rep(xcRB, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
@@ -470,6 +470,8 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
+ ap_init_message(&ap_msg);
+
target_num = (unsigned short) xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
@@ -487,7 +489,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
if (copy_from_user(targets, uptr,
target_num * sizeof(*targets))) {
rc = -EFAULT;
- goto out;
+ goto out_free;
}
}
@@ -544,6 +546,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
out_free:
kfree(targets);
out:
+ ap_release_message(&ap_msg);
trace_s390_zcrypt_rep(xcrb, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
@@ -561,6 +564,7 @@ static long zcrypt_rng(char *buffer)
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
+ ap_init_message(&ap_msg);
rc = get_rng_fc(&ap_msg, &func_code, &domain);
if (rc)
goto out;
@@ -591,8 +595,10 @@ static long zcrypt_rng(char *buffer)
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
spin_unlock(&zcrypt_list_lock);
- if (!pref_zq)
- return -ENODEV;
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
qid = pref_zq->queue->qid;
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
@@ -602,24 +608,30 @@ static long zcrypt_rng(char *buffer)
spin_unlock(&zcrypt_list_lock);
out:
+ ap_release_message(&ap_msg);
trace_s390_zcrypt_rep(buffer, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
-void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
+static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
struct zcrypt_device_status *stat;
+ int card, queue;
+
+ memset(devstatus, 0, MAX_ZDEV_ENTRIES
+ * sizeof(struct zcrypt_device_status));
- memset(matrix, 0, sizeof(*matrix));
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- stat = matrix->device;
- stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS;
- stat += AP_QID_QUEUE(zq->queue->qid);
+ card = AP_QID_CARD(zq->queue->qid);
+ if (card >= MAX_ZDEV_CARDIDS)
+ continue;
+ queue = AP_QID_QUEUE(zq->queue->qid);
+ stat = &devstatus[card * AP_DOMAINS + queue];
stat->hwtype = zc->card->ap_dev.device_type;
stat->functions = zc->card->functions >> 26;
stat->qid = zq->queue->qid;
@@ -628,40 +640,70 @@ void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
}
spin_unlock(&zcrypt_list_lock);
}
-EXPORT_SYMBOL(zcrypt_device_status_mask);
-static void zcrypt_status_mask(char status[AP_DEVICES])
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
+ struct zcrypt_device_status_ext *stat;
+ int card, queue;
+
+ memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext));
- memset(status, 0, sizeof(char) * AP_DEVICES);
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ card = AP_QID_CARD(zq->queue->qid);
+ queue = AP_QID_QUEUE(zq->queue->qid);
+ stat = &devstatus[card * AP_DOMAINS + queue];
+ stat->hwtype = zc->card->ap_dev.device_type;
+ stat->functions = zc->card->functions >> 26;
+ stat->qid = zq->queue->qid;
+ stat->online = zq->online ? 0x01 : 0x00;
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+}
+EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
+
+static void zcrypt_status_mask(char status[], size_t max_adapters)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int card;
+
+ memset(status, 0, max_adapters);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
continue;
- status[AP_QID_CARD(zq->queue->qid)] =
- zc->online ? zc->user_space_type : 0x0d;
+ status[card] = zc->online ? zc->user_space_type : 0x0d;
}
}
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
+static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
+ int card;
- memset(qdepth, 0, sizeof(char) * AP_DEVICES);
+ memset(qdepth, 0, max_adapters);
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- qdepth[AP_QID_CARD(zq->queue->qid)] =
+ qdepth[card] =
zq->queue->pendingq_count +
zq->queue->requestq_count;
spin_unlock(&zq->queue->lock);
@@ -671,21 +713,23 @@ static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
+static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
+ int card;
- memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
+ memset(reqcnt, 0, sizeof(int) * max_adapters);
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- reqcnt[AP_QID_CARD(zq->queue->qid)] =
- zq->queue->total_request_count;
+ reqcnt[card] = zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
}
}
@@ -739,60 +783,10 @@ static int zcrypt_requestq_count(void)
return requestq_count;
}
-static int zcrypt_count_type(int type)
-{
- struct zcrypt_card *zc;
- struct zcrypt_queue *zq;
- int device_count;
-
- device_count = 0;
- spin_lock(&zcrypt_list_lock);
- for_each_zcrypt_card(zc) {
- if (zc->card->id != type)
- continue;
- for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
- continue;
- device_count++;
- }
- }
- spin_unlock(&zcrypt_list_lock);
- return device_count;
-}
-
-/**
- * zcrypt_ica_status(): Old, depracted combi status call.
- *
- * Old, deprecated combi status call.
- */
-static long zcrypt_ica_status(struct file *filp, unsigned long arg)
-{
- struct ica_z90_status *pstat;
- int ret;
-
- pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
- if (!pstat)
- return -ENOMEM;
- pstat->totalcount = zcrypt_device_count;
- pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
- pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
- pstat->requestqWaitCount = zcrypt_requestq_count();
- pstat->pendingqWaitCount = zcrypt_pendingq_count();
- pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
- pstat->cryptoDomain = ap_domain_index;
- zcrypt_status_mask(pstat->status);
- zcrypt_qdepth_mask(pstat->qdepth);
- ret = 0;
- if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
- ret = -EFAULT;
- kfree(pstat);
- return ret;
-}
-
static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- int rc;
+ int rc = 0;
switch (cmd) {
case ICARSAMODEXPO: {
@@ -871,48 +865,48 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
return rc;
}
- case ZDEVICESTATUS: {
- struct zcrypt_device_matrix *device_status;
+ case ZCRYPT_DEVICE_STATUS: {
+ struct zcrypt_device_status_ext *device_status;
+ size_t total_size = MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext);
- device_status = kzalloc(sizeof(struct zcrypt_device_matrix),
- GFP_KERNEL);
+ device_status = kzalloc(total_size, GFP_KERNEL);
if (!device_status)
return -ENOMEM;
-
- zcrypt_device_status_mask(device_status);
-
+ zcrypt_device_status_mask_ext(device_status);
if (copy_to_user((char __user *) arg, device_status,
- sizeof(struct zcrypt_device_matrix))) {
- kfree(device_status);
- return -EFAULT;
- }
-
+ total_size))
+ rc = -EFAULT;
kfree(device_status);
- return 0;
+ return rc;
}
- case Z90STAT_STATUS_MASK: {
+ case ZCRYPT_STATUS_MASK: {
char status[AP_DEVICES];
- zcrypt_status_mask(status);
- if (copy_to_user((char __user *) arg, status,
- sizeof(char) * AP_DEVICES))
+
+ zcrypt_status_mask(status, AP_DEVICES);
+ if (copy_to_user((char __user *) arg, status, sizeof(status)))
return -EFAULT;
return 0;
}
- case Z90STAT_QDEPTH_MASK: {
+ case ZCRYPT_QDEPTH_MASK: {
char qdepth[AP_DEVICES];
- zcrypt_qdepth_mask(qdepth);
- if (copy_to_user((char __user *) arg, qdepth,
- sizeof(char) * AP_DEVICES))
+
+ zcrypt_qdepth_mask(qdepth, AP_DEVICES);
+ if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
return -EFAULT;
return 0;
}
- case Z90STAT_PERDEV_REQCNT: {
- int reqcnt[AP_DEVICES];
- zcrypt_perdev_reqcnt(reqcnt);
- if (copy_to_user((int __user *) arg, reqcnt,
- sizeof(int) * AP_DEVICES))
- return -EFAULT;
- return 0;
+ case ZCRYPT_PERDEV_REQCNT: {
+ int *reqcnt;
+
+ reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
+ if (!reqcnt)
+ return -ENOMEM;
+ zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
+ if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ rc = -EFAULT;
+ kfree(reqcnt);
+ return rc;
}
case Z90STAT_REQUESTQ_COUNT:
return put_user(zcrypt_requestq_count(), (int __user *) arg);
@@ -924,38 +918,54 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
case Z90STAT_DOMAIN_INDEX:
return put_user(ap_domain_index, (int __user *) arg);
/*
- * Deprecated ioctls. Don't add another device count ioctl,
- * you can count them yourself in the user space with the
- * output of the Z90STAT_STATUS_MASK ioctl.
+ * Deprecated ioctls
*/
- case ICAZ90STATUS:
- return zcrypt_ica_status(filp, arg);
- case Z90STAT_TOTALCOUNT:
- return put_user(zcrypt_device_count, (int __user *) arg);
- case Z90STAT_PCICACOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCICA),
- (int __user *) arg);
- case Z90STAT_PCICCCOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCICC),
- (int __user *) arg);
- case Z90STAT_PCIXCCMCL2COUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
- (int __user *) arg);
- case Z90STAT_PCIXCCMCL3COUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
- (int __user *) arg);
- case Z90STAT_PCIXCCCOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
- (int __user *) arg);
- case Z90STAT_CEX2CCOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
- (int __user *) arg);
- case Z90STAT_CEX2ACOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
- (int __user *) arg);
+ case ZDEVICESTATUS: {
+ /* the old ioctl supports only 64 adapters */
+ struct zcrypt_device_status *device_status;
+ size_t total_size = MAX_ZDEV_ENTRIES
+ * sizeof(struct zcrypt_device_status);
+
+ device_status = kzalloc(total_size, GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask(device_status);
+ if (copy_to_user((char __user *) arg, device_status,
+ total_size))
+ rc = -EFAULT;
+ kfree(device_status);
+ return rc;
+ }
+ case Z90STAT_STATUS_MASK: {
+ /* the old ioctl supports only 64 adapters */
+ char status[MAX_ZDEV_CARDIDS];
+
+ zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((char __user *) arg, status, sizeof(status)))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_QDEPTH_MASK: {
+ /* the old ioctl supports only 64 adapters */
+ char qdepth[MAX_ZDEV_CARDIDS];
+
+ zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_PERDEV_REQCNT: {
+ /* the old ioctl supports only 64 adapters */
+ int reqcnt[MAX_ZDEV_CARDIDS];
+
+ zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ return -EFAULT;
+ return 0;
+ }
+ /* unknown ioctl number */
default:
- /* unknown ioctl number */
+ ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
return -ENOIOCTLCMD;
}
}
@@ -1152,201 +1162,6 @@ static struct miscdevice zcrypt_misc_device = {
.fops = &zcrypt_fops,
};
-/*
- * Deprecated /proc entry support.
- */
-static struct proc_dir_entry *zcrypt_entry;
-
-static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- seq_printf(m, "%01x", (unsigned int) addr[i]);
- seq_putc(m, ' ');
-}
-
-static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
-{
- int inl, c, cx;
-
- seq_printf(m, " ");
- inl = 0;
- for (c = 0; c < (len / 16); c++) {
- sprintcl(m, addr+inl, 16);
- inl += 16;
- }
- cx = len%16;
- if (cx) {
- sprintcl(m, addr+inl, cx);
- inl += cx;
- }
- seq_putc(m, '\n');
-}
-
-static void sprinthx(unsigned char *title, struct seq_file *m,
- unsigned char *addr, unsigned int len)
-{
- int inl, r, rx;
-
- seq_printf(m, "\n%s\n", title);
- inl = 0;
- for (r = 0; r < (len / 64); r++) {
- sprintrw(m, addr+inl, 64);
- inl += 64;
- }
- rx = len % 64;
- if (rx) {
- sprintrw(m, addr+inl, rx);
- inl += rx;
- }
- seq_putc(m, '\n');
-}
-
-static void sprinthx4(unsigned char *title, struct seq_file *m,
- unsigned int *array, unsigned int len)
-{
- seq_printf(m, "\n%s\n", title);
- seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false);
- seq_putc(m, '\n');
-}
-
-static int zcrypt_proc_show(struct seq_file *m, void *v)
-{
- char workarea[sizeof(int) * AP_DEVICES];
-
- seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
- ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
- seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
- seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
- seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
- seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
- seq_printf(m, "PCIXCC MCL2 count: %d\n",
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
- seq_printf(m, "PCIXCC MCL3 count: %d\n",
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
- seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
- seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
- seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
- seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
- seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
- seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
- seq_printf(m, "Total open handles: %d\n\n",
- atomic_read(&zcrypt_open_count));
- zcrypt_status_mask(workarea);
- sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
- "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
- m, workarea, AP_DEVICES);
- zcrypt_qdepth_mask(workarea);
- sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
- zcrypt_perdev_reqcnt((int *) workarea);
- sprinthx4("Per-device successfully completed request counts",
- m, (unsigned int *) workarea, AP_DEVICES);
- return 0;
-}
-
-static int zcrypt_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zcrypt_proc_show, NULL);
-}
-
-static void zcrypt_disable_card(int index)
-{
- struct zcrypt_card *zc;
- struct zcrypt_queue *zq;
-
- spin_lock(&zcrypt_list_lock);
- for_each_zcrypt_card(zc) {
- for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
- continue;
- zq->online = 0;
- ap_flush_queue(zq->queue);
- }
- }
- spin_unlock(&zcrypt_list_lock);
-}
-
-static void zcrypt_enable_card(int index)
-{
- struct zcrypt_card *zc;
- struct zcrypt_queue *zq;
-
- spin_lock(&zcrypt_list_lock);
- for_each_zcrypt_card(zc) {
- for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
- continue;
- zq->online = 1;
- ap_flush_queue(zq->queue);
- }
- }
- spin_unlock(&zcrypt_list_lock);
-}
-
-static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- unsigned char *lbuf, *ptr;
- size_t local_count;
- int j;
-
- if (count <= 0)
- return 0;
-
-#define LBUFSIZE 1200UL
- lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
- if (!lbuf)
- return 0;
-
- local_count = min(LBUFSIZE - 1, count);
- if (copy_from_user(lbuf, buffer, local_count) != 0) {
- kfree(lbuf);
- return -EFAULT;
- }
- lbuf[local_count] = '\0';
-
- ptr = strstr(lbuf, "Online devices");
- if (!ptr)
- goto out;
- ptr = strstr(ptr, "\n");
- if (!ptr)
- goto out;
- ptr++;
-
- if (strstr(ptr, "Waiting work element counts") == NULL)
- goto out;
-
- for (j = 0; j < 64 && *ptr; ptr++) {
- /*
- * '0' for no device, '1' for PCICA, '2' for PCICC,
- * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
- * '5' for CEX2C and '6' for CEX2A'
- * '7' for CEX3C and '8' for CEX3A
- */
- if (*ptr >= '0' && *ptr <= '8')
- j++;
- else if (*ptr == 'd' || *ptr == 'D')
- zcrypt_disable_card(j++);
- else if (*ptr == 'e' || *ptr == 'E')
- zcrypt_enable_card(j++);
- else if (*ptr != ' ' && *ptr != '\t')
- break;
- }
-out:
- kfree(lbuf);
- return count;
-}
-
-static const struct file_operations zcrypt_proc_fops = {
- .owner = THIS_MODULE,
- .open = zcrypt_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = zcrypt_proc_write,
-};
-
static int zcrypt_rng_device_count;
static u32 *zcrypt_rng_buffer;
static int zcrypt_rng_buffer_index;
@@ -1448,27 +1263,15 @@ int __init zcrypt_api_init(void)
if (rc)
goto out;
- atomic_set(&zcrypt_rescan_req, 0);
-
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
if (rc < 0)
goto out;
- /* Set up the proc file system */
- zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
- &zcrypt_proc_fops);
- if (!zcrypt_entry) {
- rc = -ENOMEM;
- goto out_misc;
- }
-
zcrypt_msgtype6_init();
zcrypt_msgtype50_init();
return 0;
-out_misc:
- misc_deregister(&zcrypt_misc_device);
out:
return rc;
}
@@ -1480,7 +1283,6 @@ out:
*/
void __exit zcrypt_api_exit(void)
{
- remove_proc_entry("driver/z90crypt", NULL);
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 9fff8912f6e3..f149a8fee60d 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -21,30 +21,6 @@
#include <asm/zcrypt.h>
#include "ap_bus.h"
-/* deprecated status calls */
-#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
-#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int)
-
-/**
- * This structure is deprecated and the corresponding ioctl() has been
- * replaced with individual ioctl()s for each piece of data!
- */
-struct ica_z90_status {
- int totalcount;
- int leedslitecount; // PCICA
- int leeds2count; // PCICC
- // int PCIXCCCount; is not in struct for backward compatibility
- int requestqWaitCount;
- int pendingqWaitCount;
- int totalOpenCount;
- int cryptoDomain;
- // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
- // 5=CEX2C
- unsigned char status[64];
- // qdepth: # work elements waiting for each device
- unsigned char qdepth[64];
-};
-
/**
* device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
* PCIXCC_MCL3, CEX2C, or CEX2A
@@ -179,6 +155,6 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
-void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus);
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index f54bef4a928e..97d4bacbc442 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1084,6 +1084,13 @@ out_free:
return rc;
}
+/**
+ * Fetch function code from cprb.
+ * Extracting the fc requires to copy the cprb from userspace.
+ * So this function allocates memory and needs an ap_msg prepared
+ * by the caller with ap_init_message(). Also the caller has to
+ * make sure ap_release_message() is always called even on failure.
+ */
unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
struct ap_message *ap_msg,
unsigned int *func_code, unsigned short **dom)
@@ -1091,9 +1098,7 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
- int rc;
- ap_init_message(ap_msg);
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg->message)
return -ENOMEM;
@@ -1101,17 +1106,10 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
- if (!ap_msg->private) {
- kzfree(ap_msg->message);
+ if (!ap_msg->private)
return -ENOMEM;
- }
memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
- rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
- if (rc) {
- kzfree(ap_msg->message);
- kzfree(ap_msg->private);
- }
- return rc;
+ return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
}
/**
@@ -1139,11 +1137,16 @@ static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
- kzfree(ap_msg->message);
- kzfree(ap_msg->private);
return rc;
}
+/**
+ * Fetch function code from ep11 cprb.
+ * Extracting the fc requires to copy the ep11 cprb from userspace.
+ * So this function allocates memory and needs an ap_msg prepared
+ * by the caller with ap_init_message(). Also the caller has to
+ * make sure ap_release_message() is always called even on failure.
+ */
unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
struct ap_message *ap_msg,
unsigned int *func_code)
@@ -1151,9 +1154,7 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_EP11,
};
- int rc;
- ap_init_message(ap_msg);
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg->message)
return -ENOMEM;
@@ -1161,17 +1162,10 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
- if (!ap_msg->private) {
- kzfree(ap_msg->message);
+ if (!ap_msg->private)
return -ENOMEM;
- }
memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
- rc = xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
- if (rc) {
- kzfree(ap_msg->message);
- kzfree(ap_msg->private);
- }
- return rc;
+ return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
}
/**
@@ -1246,8 +1240,6 @@ static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
- kzfree(ap_msg->message);
- kzfree(ap_msg->private);
return rc;
}
@@ -1258,7 +1250,6 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
- ap_init_message(ap_msg);
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg->message)
return -ENOMEM;
@@ -1266,10 +1257,8 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
- if (!ap_msg->private) {
- kzfree(ap_msg->message);
+ if (!ap_msg->private)
return -ENOMEM;
- }
memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
@@ -1313,8 +1302,6 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
- kzfree(ap_msg->message);
- kzfree(ap_msg->private);
return rc;
}
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0ee8f33efb54..2d9fe7e4ee40 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1928,6 +1928,8 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
return -EINVAL;
/* TODO: sanity checks */
card->portno = value;
+ if (card->dev)
+ card->dev->dev_port = card->portno;
return count;
@@ -2158,6 +2160,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
card->dev = dev;
card->dev->ml_priv = card;
card->dev->netdev_ops = &lcs_netdev_ops;
+ card->dev->dev_port = card->portno;
memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
#ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card))
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 4326715dc13e..2a5fec55bf60 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -148,6 +148,7 @@ struct qeth_perf_stats {
unsigned int tx_csum;
unsigned int tx_lin;
unsigned int tx_linfail;
+ unsigned int rx_csum;
};
/* Routing stuff */
@@ -557,7 +558,6 @@ enum qeth_prot_versions {
enum qeth_cmd_buffer_state {
BUF_STATE_FREE,
BUF_STATE_LOCKED,
- BUF_STATE_PROCESSED,
};
enum qeth_cq {
@@ -601,7 +601,6 @@ struct qeth_channel {
struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
atomic_t irq_pending;
int io_buf_no;
- int buf_no;
};
/**
@@ -714,9 +713,6 @@ enum qeth_discipline_id {
struct qeth_discipline {
const struct device_type *devtype;
- void (*start_poll)(struct ccw_device *, int, unsigned long);
- qdio_handler_t *input_handler;
- qdio_handler_t *output_handler;
int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
int (*recover)(void *ptr);
int (*setup) (struct ccwgroup_device *);
@@ -782,9 +778,9 @@ struct qeth_card {
struct qeth_card_options options;
wait_queue_head_t wait_q;
- spinlock_t vlanlock;
spinlock_t mclock;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ struct mutex vid_list_mutex; /* vid_list */
struct list_head vid_list;
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
@@ -869,6 +865,32 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
}
}
+static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
+ u8 flags)
+{
+ if ((card->dev->features & NETIF_F_RXCSUM) &&
+ (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (card->options.performance_stats)
+ card->perf_stats.rx_csum++;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
+static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
+{
+ *flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
+ *flags |= QETH_HDR_EXT_UDP;
+ if (ipv == 4) {
+ /* some HW requires combined L3+L4 csum offload: */
+ *flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
+ ip_hdr(skb)->check = 0;
+ }
+}
+
static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
struct qeth_buffer_pool_entry *entry)
{
@@ -881,6 +903,27 @@ static inline int qeth_is_diagass_supported(struct qeth_card *card,
return card->info.diagass_support & (__u32)cmd;
}
+int qeth_send_simple_setassparms_prot(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code, long data,
+ enum qeth_prot_versions prot);
+/* IPv4 variant */
+static inline int qeth_send_simple_setassparms(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code, long data)
+{
+ return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
+ data, QETH_PROT_IPV4);
+}
+
+static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code, long data)
+{
+ return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
+ data, QETH_PROT_IPV6);
+}
+
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
@@ -923,13 +966,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
-void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
int qeth_poll(struct napi_struct *napi, int budget);
-void qeth_qdio_input_handler(struct ccw_device *,
- unsigned int, unsigned int, int,
- int, unsigned long);
-void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
- int, int, int, unsigned long);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
@@ -981,8 +1018,6 @@ int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
void qeth_trace_features(struct qeth_card *);
void qeth_close_dev(struct qeth_card *);
-int qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
- __u16, long);
int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
long,
int (*reply_cb)(struct qeth_card *,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 19203340f879..9f28b6f2efc4 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -10,6 +10,7 @@
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
@@ -32,7 +33,6 @@
#include <asm/chpid.h>
#include <asm/io.h>
#include <asm/sysinfo.h>
-#include <asm/compat.h>
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
@@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
qeth_put_reply(reply);
}
spin_unlock_irqrestore(&card->lock, flags);
- atomic_set(&card->write.irq_pending, 0);
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
@@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
qeth_release_buffer(channel, &channel->iob[cnt]);
- channel->buf_no = 0;
channel->io_buf_no = 0;
}
EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
@@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
kfree(channel->iob[cnt].data);
return -ENOMEM;
}
- channel->buf_no = 0;
channel->io_buf_no = 0;
atomic_set(&channel->irq_pending, 0);
spin_lock_init(&channel->iob_lock);
@@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
{
int rc;
int cstat, dstat;
- struct qeth_cmd_buffer *buffer;
+ struct qeth_cmd_buffer *iob = NULL;
struct qeth_channel *channel;
struct qeth_card *card;
- struct qeth_cmd_buffer *iob;
- __u8 index;
-
- if (__qeth_check_irb_error(cdev, intparm, irb))
- return;
- cstat = irb->scsw.cmd.cstat;
- dstat = irb->scsw.cmd.dstat;
card = CARD_FROM_CDEV(cdev);
if (!card)
@@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
channel = &card->data;
QETH_CARD_TEXT(card, 5, "data");
}
+
+ if (qeth_intparm_is_iob(intparm))
+ iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+
+ if (__qeth_check_irb_error(cdev, intparm, irb)) {
+ /* IO was terminated, free its resources. */
+ if (iob)
+ qeth_release_buffer(iob->channel, iob);
+ atomic_set(&channel->irq_pending, 0);
+ wake_up(&card->wait_q);
+ return;
+ }
+
atomic_set(&channel->irq_pending, 0);
if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
@@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
/* we don't have to handle this further */
intparm = 0;
}
+
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+
if ((dstat & DEV_STAT_UNIT_EXCEP) ||
(dstat & DEV_STAT_UNIT_CHECK) ||
(cstat)) {
@@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
channel->state = CH_STATE_RCD_DONE;
goto out;
}
- if (intparm) {
- buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
- buffer->state = BUF_STATE_PROCESSED;
- }
if (channel == &card->data)
return;
if (channel == &card->read &&
channel->state == CH_STATE_UP)
__qeth_issue_next_read(card);
- iob = channel->iob;
- index = channel->buf_no;
- while (iob[index].state == BUF_STATE_PROCESSED) {
- if (iob[index].callback != NULL)
- iob[index].callback(channel, iob + index);
+ if (iob && iob->callback)
+ iob->callback(iob->channel, iob);
- index = (index + 1) % QETH_CMD_BUFFER_NO;
- }
- channel->buf_no = index;
out:
wake_up(&card->wait_q);
return;
@@ -1369,7 +1366,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card)
static void qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
- struct channel_path_desc *chp_dsc;
+ struct channel_path_desc_fmt0 *chp_dsc;
QETH_DBF_TEXT(SETUP, 2, "chp_desc");
@@ -1470,13 +1467,13 @@ static int qeth_setup_card(struct qeth_card *card)
card->lan_online = 0;
card->read_or_write_problem = 0;
card->dev = NULL;
- spin_lock_init(&card->vlanlock);
spin_lock_init(&card->mclock);
spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
+ mutex_init(&card->vid_list_mutex);
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
@@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start(channel->ccwdev,
- &channel->ccw, (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ (addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
@@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
if (channel->state != CH_STATE_UP) {
rc = -ETIME;
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- qeth_clear_cmd_buffers(channel);
} else
rc = 0;
return rc;
@@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start(channel->ccwdev,
- &channel->ccw, (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ (addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
@@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
dev_name(&channel->ccwdev->dev));
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
- qeth_clear_cmd_buffers(channel);
return -ETIME;
}
return qeth_idx_activate_get_answer(channel, idx_reply_cb);
@@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
- (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ (addr_t) iob, 0, 0, event_timeout);
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
@@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
}
- if (reply->rc == -EIO)
- goto error;
rc = reply->rc;
qeth_put_reply(reply);
return rc;
@@ -2211,10 +2204,6 @@ time_err:
list_del_init(&reply->list);
spin_unlock_irqrestore(&reply->card->lock, flags);
atomic_inc(&reply->received);
-error:
- atomic_set(&card->write.irq_pending, 0);
- qeth_release_buffer(iob->channel, iob);
- card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
rc = reply->rc;
qeth_put_reply(reply);
return rc;
@@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card)
return rc;
}
-static int qeth_default_setadapterparms_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
+static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 4, "defadpcb");
-
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0)
+ if (!cmd->hdr.return_code)
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
- return 0;
+ return cmd->hdr.return_code;
}
static int qeth_query_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 3, "quyadpcb");
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- cmd = (struct qeth_ipa_cmd *) data;
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
@@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
}
card->options.adp.supported_funcs =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
- return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
+ return 0;
}
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
@@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
- struct qeth_switch_info *sw_info;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_query_switch_attributes *attrs;
+ struct qeth_switch_info *sw_info;
QETH_CARD_TEXT(card, 2, "qswiatcb");
- cmd = (struct qeth_ipa_cmd *) data;
- sw_info = (struct qeth_switch_info *)reply->param;
- if (cmd->data.setadapterparms.hdr.return_code == 0) {
- attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
- sw_info->capabilities = attrs->capabilities;
- sw_info->settings = attrs->settings;
- QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
- sw_info->settings);
- }
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
+ sw_info = (struct qeth_switch_info *)reply->param;
+ attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
+ sw_info->capabilities = attrs->capabilities;
+ sw_info->settings = attrs->settings;
+ QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
+ sw_info->settings);
return 0;
}
@@ -3606,15 +3588,14 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
}
}
-void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
- unsigned long card_ptr)
+static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
+ unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
if (card->dev && (card->dev->flags & IFF_UP))
napi_schedule(&card->napi);
}
-EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
@@ -3716,9 +3697,10 @@ out:
return;
}
-void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
- unsigned int queue, int first_elem, int count,
- unsigned long card_ptr)
+static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
+ unsigned int qdio_err, int queue,
+ int first_elem, int count,
+ unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
@@ -3729,14 +3711,12 @@ void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
else if (qdio_err)
qeth_schedule_recovery(card);
-
-
}
-EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
-void qeth_qdio_output_handler(struct ccw_device *ccwdev,
- unsigned int qdio_error, int __queue, int first_element,
- int count, unsigned long card_ptr)
+static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
+ unsigned int qdio_error, int __queue,
+ int first_element, int count,
+ unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
@@ -3805,7 +3785,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
card->perf_stats.outbound_handler_time += qeth_get_micros() -
card->perf_stats.outbound_handler_start_time;
}
-EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
@@ -4207,16 +4186,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_setadpparms *setparms;
QETH_CARD_TEXT(card, 4, "prmadpcb");
- cmd = (struct qeth_ipa_cmd *) data;
setparms = &(cmd->data.setadapterparms);
-
- qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
- if (cmd->hdr.return_code) {
+ if (qeth_setadpparms_inspect_rc(cmd)) {
QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
@@ -4286,18 +4262,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 4, "chgmaccb");
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 ||
!(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
ether_addr_copy(card->dev->dev_addr,
cmd->data.setadapterparms.data.change_addr.addr);
card->info.mac_bits |= QETH_LAYER2_MAC_READ;
}
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
return 0;
}
@@ -4328,13 +4304,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_set_access_ctrl *access_ctrl_req;
int fallback = *(int *)reply->param;
QETH_CARD_TEXT(card, 4, "setaccb");
+ if (cmd->hdr.return_code)
+ return 0;
+ qeth_setadpparms_inspect_rc(cmd);
- cmd = (struct qeth_ipa_cmd *) data;
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
QETH_DBF_TEXT_(SETUP, 2, "setaccb");
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -4407,7 +4385,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
card->options.isolation = card->options.prev_isolation;
break;
}
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
return 0;
}
@@ -4695,14 +4672,15 @@ out:
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
struct qeth_qoat_priv *priv;
char *resdata;
int resdatalen;
QETH_CARD_TEXT(card, 3, "qoatcb");
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- cmd = (struct qeth_ipa_cmd *)data;
priv = (struct qeth_qoat_priv *)reply->param;
resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
resdata = (char *)data + 28;
@@ -4796,21 +4774,18 @@ out:
static int qeth_query_card_info_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
struct qeth_query_card_info *card_info;
- struct carrier_info *carrier_info;
QETH_CARD_TEXT(card, 2, "qcrdincb");
- carrier_info = (struct carrier_info *)reply->param;
- cmd = (struct qeth_ipa_cmd *)data;
- card_info = &cmd->data.setadapterparms.data.card_info;
- if (cmd->data.setadapterparms.hdr.return_code == 0) {
- carrier_info->card_type = card_info->card_type;
- carrier_info->port_mode = card_info->port_mode;
- carrier_info->port_speed = card_info->port_speed;
- }
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ card_info = &cmd->data.setadapterparms.data.card_info;
+ carrier_info->card_type = card_info->card_type;
+ carrier_info->port_mode = card_info->port_mode;
+ carrier_info->port_speed = card_info->port_speed;
return 0;
}
@@ -4857,7 +4832,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
goto out;
}
- ccw_device_get_id(CARD_DDEV(card), &id);
+ ccw_device_get_id(CARD_RDEV(card), &id);
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION2;
request->op_code = DIAG26C_GET_MAC;
@@ -5017,7 +4992,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
goto out_free_in_sbals;
}
for (i = 0; i < card->qdio.no_in_queues; ++i)
- queue_start_poll[i] = card->discipline->start_poll;
+ queue_start_poll[i] = qeth_qdio_start_poll;
qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
@@ -5041,8 +5016,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.qib_param_field = qib_param_field;
init_data.no_input_qs = card->qdio.no_in_queues;
init_data.no_output_qs = card->qdio.no_out_queues;
- init_data.input_handler = card->discipline->input_handler;
- init_data.output_handler = card->discipline->output_handler;
+ init_data.input_handler = qeth_qdio_input_handler;
+ init_data.output_handler = qeth_qdio_output_handler;
init_data.queue_start_poll_array = queue_start_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
@@ -5226,6 +5201,11 @@ retriable:
rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
if (rc == -ENOMEM)
goto out;
+ if (qeth_is_supported(card, IPA_IPV6)) {
+ rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
+ if (rc == -ENOMEM)
+ goto out;
+ }
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
@@ -5533,26 +5513,26 @@ int qeth_send_setassparms(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_send_setassparms);
-int qeth_send_simple_setassparms(struct qeth_card *card,
- enum qeth_ipa_funcs ipa_func,
- __u16 cmd_code, long data)
+int qeth_send_simple_setassparms_prot(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code, long data,
+ enum qeth_prot_versions prot)
{
int rc;
int length = 0;
struct qeth_cmd_buffer *iob;
- QETH_CARD_TEXT(card, 4, "simassp4");
+ QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
if (data)
length = sizeof(__u32);
- iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
- length, QETH_PROT_IPV4);
+ iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
if (!iob)
return -ENOMEM;
rc = qeth_send_setassparms(card, iob, length, data,
qeth_setassparms_cb, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms);
+EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
static void qeth_unregister_dbf_views(void)
{
@@ -6030,7 +6010,8 @@ static struct {
{"tx lin"},
{"tx linfail"},
{"cq handler count"},
- {"cq handler time"}
+ {"cq handler time"},
+ {"rx csum"}
};
int qeth_core_get_sset_count(struct net_device *dev, int stringset)
@@ -6092,6 +6073,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
data[35] = card->perf_stats.tx_linfail;
data[36] = card->perf_stats.cq_cnt;
data[37] = card->perf_stats.cq_time;
+ data[38] = card->perf_stats.rx_csum;
}
EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
@@ -6348,14 +6330,15 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
__u16 cmd_code, long data,
- struct qeth_checksum_cmd *chksum_cb)
+ struct qeth_checksum_cmd *chksum_cb,
+ enum qeth_prot_versions prot)
{
struct qeth_cmd_buffer *iob;
int rc = -ENOMEM;
QETH_CARD_TEXT(card, 4, "chkdocmd");
iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
- sizeof(__u32), QETH_PROT_IPV4);
+ sizeof(__u32), prot);
if (iob)
rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
qeth_ipa_checksum_run_cmd_cb,
@@ -6363,16 +6346,17 @@ static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
return rc;
}
-static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
+static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
+ enum qeth_prot_versions prot)
{
- const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR |
- QETH_IPA_CHECKSUM_UDP |
- QETH_IPA_CHECKSUM_TCP;
+ u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
struct qeth_checksum_cmd chksum_cb;
int rc;
+ if (prot == QETH_PROT_IPV4)
+ required_features |= QETH_IPA_CHECKSUM_IP_HDR;
rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
- &chksum_cb);
+ &chksum_cb, prot);
if (!rc) {
if ((required_features & chksum_cb.supported) !=
required_features)
@@ -6384,37 +6368,42 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
QETH_CARD_IFNAME(card));
}
if (rc) {
- qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
+ qeth_send_simple_setassparms_prot(card, cstype,
+ IPA_CMD_ASS_STOP, 0, prot);
dev_warn(&card->gdev->dev,
- "Starting HW checksumming for %s failed, using SW checksumming\n",
- QETH_CARD_IFNAME(card));
+ "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
+ prot, QETH_CARD_IFNAME(card));
return rc;
}
rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
- chksum_cb.supported, &chksum_cb);
+ chksum_cb.supported, &chksum_cb,
+ prot);
if (!rc) {
if ((required_features & chksum_cb.enabled) !=
required_features)
rc = -EIO;
}
if (rc) {
- qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
+ qeth_send_simple_setassparms_prot(card, cstype,
+ IPA_CMD_ASS_STOP, 0, prot);
dev_warn(&card->gdev->dev,
- "Enabling HW checksumming for %s failed, using SW checksumming\n",
- QETH_CARD_IFNAME(card));
+ "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
+ prot, QETH_CARD_IFNAME(card));
return rc;
}
- dev_info(&card->gdev->dev, "HW Checksumming (%sbound) enabled\n",
- cstype == IPA_INBOUND_CHECKSUM ? "in" : "out");
+ dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
+ cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
return 0;
}
-static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype)
+static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
+ enum qeth_prot_versions prot)
{
- int rc = (on) ? qeth_send_checksum_on(card, cstype)
- : qeth_send_simple_setassparms(card, cstype,
- IPA_CMD_ASS_STOP, 0);
+ int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
+ : qeth_send_simple_setassparms_prot(card, cstype,
+ IPA_CMD_ASS_STOP, 0,
+ prot);
return rc ? -EIO : 0;
}
@@ -6441,8 +6430,31 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
return rc;
}
-#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO)
+static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
+{
+ int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
+ int rc_ipv6;
+
+ if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
+ rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
+ QETH_PROT_IPV4);
+ if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
+ /* no/one Offload Assist available, so the rc is trivial */
+ return rc_ipv4;
+
+ rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
+ QETH_PROT_IPV6);
+ if (on)
+ /* enable: success if any Assist is active */
+ return (rc_ipv6) ? rc_ipv4 : 0;
+
+ /* disable: failure if any Assist is still active */
+ return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
+}
+
+#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
+ NETIF_F_IPV6_CSUM)
/**
* qeth_recover_features() - Restore device features after recovery
* @dev: the recovering net_device
@@ -6477,16 +6489,19 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
if ((changed & NETIF_F_IP_CSUM)) {
- rc = qeth_set_ipa_csum(card,
- features & NETIF_F_IP_CSUM ? 1 : 0,
- IPA_OUTBOUND_CHECKSUM);
+ rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
if (rc)
changed ^= NETIF_F_IP_CSUM;
}
- if ((changed & NETIF_F_RXCSUM)) {
- rc = qeth_set_ipa_csum(card,
- features & NETIF_F_RXCSUM ? 1 : 0,
- IPA_INBOUND_CHECKSUM);
+ if (changed & NETIF_F_IPV6_CSUM) {
+ rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
+ if (rc)
+ changed ^= NETIF_F_IPV6_CSUM;
+ }
+ if (changed & NETIF_F_RXCSUM) {
+ rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
if (rc)
changed ^= NETIF_F_RXCSUM;
}
@@ -6513,7 +6528,10 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
QETH_DBF_TEXT(SETUP, 2, "fixfeat");
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
features &= ~NETIF_F_IP_CSUM;
- if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
+ if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
+ features &= ~NETIF_F_IPV6_CSUM;
+ if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
+ !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
features &= ~NETIF_F_RXCSUM;
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO;
@@ -6563,10 +6581,14 @@ static int __init qeth_core_init(void)
mutex_init(&qeth_mod_mutex);
qeth_wq = create_singlethread_workqueue("qeth_wq");
+ if (!qeth_wq) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
rc = qeth_register_dbf_views();
if (rc)
- goto out_err;
+ goto dbf_err;
qeth_core_root_dev = root_device_register("qeth");
rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
if (rc)
@@ -6603,6 +6625,8 @@ slab_err:
root_device_unregister(qeth_core_root_dev);
register_err:
qeth_unregister_dbf_views();
+dbf_err:
+ destroy_workqueue(qeth_wq);
out_err:
pr_err("Initializing the qeth device driver failed\n");
return rc;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 619f897b4bb0..878e62f35169 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[];
#define QETH_HALT_CHANNEL_PARM -11
#define QETH_RCD_PARM -12
+static inline bool qeth_intparm_is_iob(unsigned long intparm)
+{
+ switch (intparm) {
+ case QETH_CLEAR_CHANNEL_PARM:
+ case QETH_HALT_CHANNEL_PARM:
+ case QETH_RCD_PARM:
+ case 0:
+ return false;
+ }
+ return true;
+}
+
/*****************************************************************************/
/* IP Assist related definitions */
/*****************************************************************************/
@@ -234,6 +246,8 @@ enum qeth_ipa_funcs {
IPA_QUERY_ARP_ASSIST = 0x00040000L,
IPA_INBOUND_TSO = 0x00080000L,
IPA_OUTBOUND_TSO = 0x00100000L,
+ IPA_INBOUND_CHECKSUM_V6 = 0x00400000L,
+ IPA_OUTBOUND_CHECKSUM_V6 = 0x00800000L,
};
/* SETIP/DELIP IPA Command: ***************************************************/
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index ae81534de912..c3f18afb368b 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -144,6 +144,8 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
goto out;
}
card->info.portno = portno;
+ if (card->dev)
+ card->dev->dev_port = portno;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 50a313806dde..a7cb37da6a21 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -17,11 +17,9 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
-#include <linux/ip.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/hashtable.h>
-#include <linux/string.h>
#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
@@ -122,13 +120,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Setmac");
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
if (rc == 0) {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- ether_addr_copy(card->dev->dev_addr, mac);
dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
+ "MAC address %pM successfully registered on device %s\n",
+ mac, card->dev->name);
} else {
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
switch (rc) {
case -EEXIST:
dev_warn(&card->gdev->dev,
@@ -143,19 +138,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
return rc;
}
-static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 2, "L2Delmac");
- if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- return 0;
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
- if (rc == 0)
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- return rc;
-}
-
static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
{
enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
@@ -212,23 +194,6 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
return RTN_UNSPEC;
}
-static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
-
- /* tcph->check contains already the pseudo hdr checksum
- * so just set the header flags
- */
- if (iph->protocol == IPPROTO_UDP)
- hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_UDP;
- hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
- QETH_HDR_EXT_CSUM_HDR_REQ;
- iph->check = 0;
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
-}
-
static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
int cast_type, unsigned int data_len)
{
@@ -314,12 +279,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
static void qeth_l2_process_vlans(struct qeth_card *card)
{
struct qeth_vlan_vid *id;
+
QETH_CARD_TEXT(card, 3, "L2prcvln");
- spin_lock_bh(&card->vlanlock);
+ mutex_lock(&card->vid_list_mutex);
list_for_each_entry(id, &card->vid_list, list) {
qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
}
- spin_unlock_bh(&card->vlanlock);
+ mutex_unlock(&card->vid_list_mutex);
}
static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
@@ -336,7 +302,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
QETH_CARD_TEXT(card, 3, "aidREC");
return 0;
}
- id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
if (id) {
id->vid = vid;
rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
@@ -344,9 +310,9 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
kfree(id);
return rc;
}
- spin_lock_bh(&card->vlanlock);
+ mutex_lock(&card->vid_list_mutex);
list_add_tail(&id->list, &card->vid_list);
- spin_unlock_bh(&card->vlanlock);
+ mutex_unlock(&card->vid_list_mutex);
} else {
return -ENOMEM;
}
@@ -365,7 +331,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
QETH_CARD_TEXT(card, 3, "kidREC");
return 0;
}
- spin_lock_bh(&card->vlanlock);
+ mutex_lock(&card->vid_list_mutex);
list_for_each_entry(id, &card->vid_list, list) {
if (id->vid == vid) {
list_del(&id->list);
@@ -373,7 +339,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
break;
}
}
- spin_unlock_bh(&card->vlanlock);
+ mutex_unlock(&card->vid_list_mutex);
if (tmpid) {
rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
kfree(tmpid);
@@ -440,15 +406,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
switch (hdr->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
skb->protocol = eth_type_trans(skb, skb->dev);
- if ((card->dev->features & NETIF_F_RXCSUM)
- && ((hdr->hdr.l2.flags[1] &
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
+ qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
if (skb->protocol == htons(ETH_P_802_2))
*((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
len = skb->len;
@@ -481,7 +439,6 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
- char vendor_pre[] = {0x02, 0x00, 0x00};
QETH_DBF_TEXT(SETUP, 2, "l2reqmac");
QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
@@ -501,16 +458,20 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
card->info.type == QETH_CARD_TYPE_OSX ||
card->info.guestlan) {
rc = qeth_setadpparms_change_macaddr(card);
- if (rc) {
- QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
- "device %s: x%x\n", CARD_BUS_ID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
- return rc;
- }
- } else {
- eth_random_addr(card->dev->dev_addr);
- memcpy(card->dev->dev_addr, vendor_pre, 3);
+ if (!rc)
+ goto out;
+ QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
+ /* fall back once more: */
}
+
+ /* some devices don't support a custom MAC address: */
+ if (card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX)
+ return (rc) ? rc : -EADDRNOTAVAIL;
+ eth_hw_addr_random(card->dev);
+
out:
QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len);
return 0;
@@ -520,6 +481,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct qeth_card *card = dev->ml_priv;
+ u8 old_addr[ETH_ALEN];
int rc = 0;
QETH_CARD_TEXT(card, 3, "setmac");
@@ -531,14 +493,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -EOPNOTSUPP;
}
QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "setmcREC");
return -ERESTARTSYS;
}
- rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc || (rc == -ENOENT))
- rc = qeth_l2_send_setmac(card, addr->sa_data);
- return rc ? -EINVAL : 0;
+
+ if (!qeth_card_hw_is_reachable(card)) {
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+ return 0;
+ }
+
+ /* don't register the same address twice */
+ if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
+ (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+ return 0;
+
+ /* add the new address, switch over, drop the old */
+ rc = qeth_l2_send_setmac(card, addr->sa_data);
+ if (rc)
+ return rc;
+ ether_addr_copy(old_addr, dev->dev_addr);
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+ qeth_l2_remove_mac(card, old_addr);
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ return 0;
}
static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -680,7 +663,8 @@ out:
}
static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int cast_type)
+ struct qeth_qdio_out_q *queue, int cast_type,
+ int ipv)
{
int push_len = sizeof(struct qeth_hdr);
unsigned int elements, nr_frags;
@@ -718,8 +702,11 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
hdr_elements = 1;
}
qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- qeth_l2_hdr_csum(card, hdr, skb);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
if (!elements) {
@@ -771,6 +758,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
{
struct qeth_card *card = dev->ml_priv;
int cast_type = qeth_l2_get_cast_type(card, skb);
+ int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
@@ -778,7 +766,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (card->qdio.do_prio_queueing || (cast_type &&
card->info.is_multicast_different))
queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
- qeth_get_ip_version(skb), cast_type)];
+ ipv, cast_type)];
else
queue = card->qdio.out_qs[card->qdio.default_out_queue];
@@ -801,7 +789,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
break;
default:
- rc = qeth_l2_xmit_osa(card, skb, queue, cast_type);
+ rc = qeth_l2_xmit_osa(card, skb, queue, cast_type, ipv);
}
if (!rc) {
@@ -978,6 +966,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->mtu = card->info.initial_mtu;
card->dev->min_mtu = 64;
card->dev->max_mtu = ETH_MAX_MTU;
+ card->dev->dev_port = card->info.portno;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
if (card->info.type == QETH_CARD_TYPE_OSN) {
card->dev->ethtool_ops = &qeth_l2_osn_ops;
@@ -1006,10 +995,15 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_IP_CSUM;
card->dev->vlan_features |= NETIF_F_IP_CSUM;
}
- if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
- card->dev->hw_features |= NETIF_F_RXCSUM;
- card->dev->vlan_features |= NETIF_F_RXCSUM;
- }
+ }
+ if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
+ card->dev->hw_features |= NETIF_F_IPV6_CSUM;
+ card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
+ }
+ if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) ||
+ qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) {
+ card->dev->hw_features |= NETIF_F_RXCSUM;
+ card->dev->vlan_features |= NETIF_F_RXCSUM;
}
card->info.broadcast_capable = 1;
@@ -1068,8 +1062,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- if (card->info.type != QETH_CARD_TYPE_OSN)
- qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
+ if (card->info.type != QETH_CARD_TYPE_OSN &&
+ !qeth_l2_send_setmac(card, card->dev->dev_addr))
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
@@ -1309,9 +1304,6 @@ static int qeth_l2_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l2_discipline = {
.devtype = &qeth_l2_devtype,
- .start_poll = qeth_qdio_start_poll,
- .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
- .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
.process_rx_buffer = qeth_l2_process_inbound_buffer,
.recover = qeth_l2_recover,
.setup = qeth_l2_probe_device,
@@ -1339,8 +1331,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
qeth_prepare_control_data(card, len, iob);
QETH_CARD_TEXT(card, 6, "osnoirqp");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
- (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index c1a16a74aa83..e7fa479adf47 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -735,22 +735,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
return rc;
}
-static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
- enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
-{
- int rc;
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 4, "simassp6");
- iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
- 0, QETH_PROT_IPV6);
- if (!iob)
- return -ENOMEM;
- rc = qeth_send_setassparms(card, iob, 0, 0,
- qeth_setassparms_cb, NULL);
- return rc;
-}
-
static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
{
int rc;
@@ -851,14 +835,6 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "softipv6");
- rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
- if (rc) {
- dev_err(&card->gdev->dev,
- "Activating IPv6 support for %s failed\n",
- QETH_CARD_IFNAME(card));
- return rc;
- }
-
if (card->info.type == QETH_CARD_TYPE_IQD)
goto out;
@@ -870,16 +846,16 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
QETH_CARD_IFNAME(card));
return rc;
}
- rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6,
- IPA_CMD_ASS_START);
+ rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6,
+ IPA_CMD_ASS_START, 0);
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
QETH_CARD_IFNAME(card));
return rc;
}
- rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
- IPA_CMD_ASS_START);
+ rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
+ IPA_CMD_ASS_START, 0);
if (rc) {
dev_warn(&card->gdev->dev,
"Enabling the passthrough mode for %s failed\n",
@@ -1293,91 +1269,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
in6_dev_put(in6_dev);
}
-static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
- unsigned short vid)
-{
- struct in_device *in_dev;
- struct in_ifaddr *ifa;
- struct qeth_ipaddr *addr;
- struct net_device *netdev;
-
- QETH_CARD_TEXT(card, 4, "frvaddr4");
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
- if (!netdev)
- return;
- in_dev = in_dev_get(netdev);
- if (!in_dev)
- return;
-
- addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!addr)
- goto out;
-
- spin_lock_bh(&card->ip_lock);
-
- for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
- addr->u.a4.addr = be32_to_cpu(ifa->ifa_address);
- addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask);
- addr->type = QETH_IP_TYPE_NORMAL;
- qeth_l3_delete_ip(card, addr);
- }
-
- spin_unlock_bh(&card->ip_lock);
-
- kfree(addr);
-out:
- in_dev_put(in_dev);
-}
-
-static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
- unsigned short vid)
-{
- struct inet6_dev *in6_dev;
- struct inet6_ifaddr *ifa;
- struct qeth_ipaddr *addr;
- struct net_device *netdev;
-
- QETH_CARD_TEXT(card, 4, "frvaddr6");
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
- if (!netdev)
- return;
-
- in6_dev = in6_dev_get(netdev);
- if (!in6_dev)
- return;
-
- addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (!addr)
- goto out;
-
- spin_lock_bh(&card->ip_lock);
-
- list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
- memcpy(&addr->u.a6.addr, &ifa->addr,
- sizeof(struct in6_addr));
- addr->u.a6.pfxlen = ifa->prefix_len;
- addr->type = QETH_IP_TYPE_NORMAL;
- qeth_l3_delete_ip(card, addr);
- }
-
- spin_unlock_bh(&card->ip_lock);
-
- kfree(addr);
-out:
- in6_dev_put(in6_dev);
-}
-
-static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
- unsigned short vid)
-{
- rcu_read_lock();
- qeth_l3_free_vlan_addresses4(card, vid);
- qeth_l3_free_vlan_addresses6(card, vid);
- rcu_read_unlock();
-}
-
static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
@@ -1398,8 +1289,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
QETH_CARD_TEXT(card, 3, "kidREC");
return 0;
}
- /* unregister IP addresses of vlan device */
- qeth_l3_free_vlan_addresses(card, vid);
clear_bit(vid, card->active_vlans);
qeth_l3_set_rx_mode(dev);
return 0;
@@ -1454,17 +1343,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
}
- if (card->dev->features & NETIF_F_RXCSUM) {
- if ((hdr->hdr.l3.ext_flags &
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
- } else
- skb->ip_summed = CHECKSUM_NONE;
+ qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
}
static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
@@ -2210,23 +2089,6 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_unlock();
}
-static void qeth_l3_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
-
- /* tcph->check contains already the pseudo hdr checksum
- * so just set the header flags
- */
- if (iph->protocol == IPPROTO_UDP)
- hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
- hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
- QETH_HDR_EXT_CSUM_HDR_REQ;
- iph->check = 0;
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
-}
-
static void qeth_tso_fill_header(struct qeth_card *card,
struct qeth_hdr *qhdr, struct sk_buff *skb)
{
@@ -2418,8 +2280,11 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
}
}
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- qeth_l3_hdr_csum(card, hdr, new_skb);
+ if (new_skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv);
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
}
elements = use_tso ?
@@ -2620,28 +2485,32 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
pr_info("qeth_l3: ignoring TR device\n");
return -ENODEV;
- } else {
- card->dev = alloc_etherdev(0);
- if (!card->dev)
- return -ENODEV;
- card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
-
- /*IPv6 address autoconfiguration stuff*/
- qeth_l3_get_unique_id(card);
- if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
- card->dev->dev_id = card->info.unique_id &
- 0xffff;
-
- card->dev->hw_features |= NETIF_F_SG;
- card->dev->vlan_features |= NETIF_F_SG;
-
- if (!card->info.guestlan) {
- card->dev->features |= NETIF_F_SG;
- card->dev->hw_features |= NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- card->dev->vlan_features |= NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- }
+ }
+
+ card->dev = alloc_etherdev(0);
+ if (!card->dev)
+ return -ENODEV;
+ card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
+
+ /*IPv6 address autoconfiguration stuff*/
+ qeth_l3_get_unique_id(card);
+ if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
+ card->dev->dev_id = card->info.unique_id & 0xffff;
+
+ card->dev->hw_features |= NETIF_F_SG;
+ card->dev->vlan_features |= NETIF_F_SG;
+
+ if (!card->info.guestlan) {
+ card->dev->features |= NETIF_F_SG;
+ card->dev->hw_features |= NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+ card->dev->vlan_features |= NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+ }
+
+ if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
+ card->dev->hw_features |= NETIF_F_IPV6_CSUM;
+ card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
@@ -2663,6 +2532,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->mtu = card->info.initial_mtu;
card->dev->min_mtu = 64;
card->dev->max_mtu = ETH_MAX_MTU;
+ card->dev->dev_port = card->info.portno;
card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -2960,9 +2830,6 @@ static int qeth_l3_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l3_discipline = {
.devtype = &qeth_l3_devtype,
- .start_poll = qeth_qdio_start_poll,
- .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
- .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
.process_rx_buffer = qeth_l3_process_inbound_buffer,
.recover = qeth_l3_recover,
.setup = qeth_l3_probe_device,
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 3b0c8b8a7634..066b5c3aaae6 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -176,7 +176,7 @@ static struct device_driver smsg_driver = {
static void __exit smsg_exit(void)
{
- cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+ cpcmd("SET SMSG OFF", NULL, 0, NULL);
device_unregister(smsg_dev);
iucv_unregister(&smsg_handler, 1);
driver_unregister(&smsg_driver);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a8b831000b2d..18c4f933e8b9 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -4,7 +4,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
+/**
+ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock must not be held.
+ */
+void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+ struct zfcp_port *port, struct scsi_device *sdev,
+ u8 want, u8 need)
+{
+ unsigned long flags;
+
+ read_lock_irqsave(&adapter->erp_lock, flags);
+ zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
+ read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
/**
* zfcp_dbf_rec_run_lvl - trace event related to running recovery
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bf8ea4df2bb8..e5eed8aac0ce 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -4,7 +4,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2018
*/
#ifndef ZFCP_EXT_H
@@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct scsi_device *sdev, u8 want, u8 need);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
struct zfcp_erp_action *erp);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 4d2ba5682493..22f9562f415c 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -4,7 +4,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
- zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+ zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
@@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
struct fc_rport *rport = port->rport;
if (rport) {
- zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+ zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
fc_remote_port_delete(rport);
port->rport = NULL;
}