diff options
author | Jiri Kosina <jkosina@suse.cz> | 2018-06-08 11:20:42 +0300 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2018-06-08 11:20:42 +0300 |
commit | c1144d29f405ce1f4e6ede6482beb3d0d09750c6 (patch) | |
tree | 0f9fe36a50005bae6ffe28a4f978e71273f5b1d1 /drivers/s390 | |
parent | d6c70a86bc72fabe7fc9d9533afdb46a56c16896 (diff) | |
parent | a317e559574b2af62095b39792d168cb98cb2561 (diff) | |
download | linux-c1144d29f405ce1f4e6ede6482beb3d0d09750c6.tar.xz |
Merge branch 'for-4.18/alps' into for-linus
hid-alps driver cleanups wrt. t4_read_write_register() handling
from Christophe Jaillet
Diffstat (limited to 'drivers/s390')
42 files changed, 1150 insertions, 952 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 1444333210c7..9ac7574e3cfb 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -15,8 +15,8 @@ config BLK_DEV_XPRAM config DCSSBLK def_tristate m - select DAX select FS_DAX_LIMITED + select DAX_DRIVER prompt "DCSSBLK support" depends on S390 && BLOCK help diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index ecef8e73d40b..04143c08bd6e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3208,7 +3208,7 @@ static void dasd_setup_queue(struct dasd_block *block) } else { max = block->base->discipline->max_blocks << block->s2b_shift; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q->limits.max_dev_sectors = max; blk_queue_logical_block_size(q, logical_block_size); blk_queue_max_hw_sectors(q, max); @@ -3231,7 +3231,7 @@ static void dasd_setup_queue(struct dasd_block *block) blk_queue_max_discard_sectors(q, max_discard_sectors); blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } } @@ -3918,8 +3918,13 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) cqr = refers; } - if (cqr->block) - list_del_init(&cqr->blocklist); + /* + * _dasd_requeue_request already checked for a valid + * blockdevice, no need to check again + * all erp requests (cqr->refers) have a cqr->block + * pointer copy from the original cqr + */ + list_del_init(&cqr->blocklist); cqr->block->base->discipline->free_cp( cqr, (struct request *) cqr->callback_data); } diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index ee14d8e45c97..ee73b0607e47 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -2214,15 +2214,28 @@ static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum) { int pos = pathmask_to_pos(lpum); + if (!(device->features & DASD_FEATURE_PATH_AUTODISABLE)) { + dev_err(&device->cdev->dev, + "Path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n", + device->path[pos].cssid, device->path[pos].chpid, lpum); + goto out; + } + /* no remaining path, cannot disable */ - if (!(dasd_path_get_opm(device) & ~lpum)) - return; + if (!(dasd_path_get_opm(device) & ~lpum)) { + dev_err(&device->cdev->dev, + "Last path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n", + device->path[pos].cssid, device->path[pos].chpid, lpum); + goto out; + } dev_err(&device->cdev->dev, "Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n", device->path[pos].cssid, device->path[pos].chpid, lpum); dasd_path_remove_opm(device, lpum); dasd_path_add_ifccpm(device, lpum); + +out: device->path[pos].errorclk = 0; atomic_set(&device->path[pos].error_count, 0); } diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index e7cd28ff1984..b9ebb565ee2c 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -1550,9 +1550,49 @@ dasd_path_threshold_store(struct device *dev, struct device_attribute *attr, dasd_put_device(device); return count; } - static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show, dasd_path_threshold_store); + +/* + * configure if path is disabled after IFCC/CCC error threshold is + * exceeded + */ +static ssize_t +dasd_path_autodisable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dasd_devmap *devmap; + int flag; + + devmap = dasd_find_busid(dev_name(dev)); + if (!IS_ERR(devmap)) + flag = (devmap->features & DASD_FEATURE_PATH_AUTODISABLE) != 0; + else + flag = (DASD_FEATURE_DEFAULT & + DASD_FEATURE_PATH_AUTODISABLE) != 0; + return snprintf(buf, PAGE_SIZE, flag ? "1\n" : "0\n"); +} + +static ssize_t +dasd_path_autodisable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int rc; + + if (kstrtouint(buf, 0, &val) || val > 1) + return -EINVAL; + + rc = dasd_set_feature(to_ccwdev(dev), + DASD_FEATURE_PATH_AUTODISABLE, val); + + return rc ? : count; +} + +static DEVICE_ATTR(path_autodisable, 0644, + dasd_path_autodisable_show, + dasd_path_autodisable_store); /* * interval for IFCC/CCC checks * meaning time with no IFCC/CCC error before the error counter @@ -1623,6 +1663,7 @@ static struct attribute * dasd_attrs[] = { &dev_attr_host_access_count.attr, &dev_attr_path_masks.attr, &dev_attr_path_threshold.attr, + &dev_attr_path_autodisable.attr, &dev_attr_path_interval.attr, &dev_attr_path_reset.attr, &dev_attr_hpf.attr, diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index f035c2f25d35..131f1989f6f3 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -27,7 +27,6 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/vtoc.h> -#include <asm/diag.h> #include "dasd_int.h" #include "dasd_diag.h" diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 29397a9dba68..be208e7adcb4 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -214,24 +214,25 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) geo->head |= head; } -static int check_XRC(struct ccw1 *ccw, struct DE_eckd_data *data, +static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data, struct dasd_device *device) { struct dasd_eckd_private *private = device->private; int rc; - if (!private->rdc_data.facilities.XRC_supported) + rc = get_phys_clock(&data->ep_sys_time); + /* + * Ignore return code if XRC is not supported or + * sync clock is switched off + */ + if ((rc && !private->rdc_data.facilities.XRC_supported) || + rc == -EOPNOTSUPP || rc == -EACCES) return 0; /* switch on System Time Stamp - needed for XRC Support */ data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ - rc = get_phys_clock(&data->ep_sys_time); - /* Ignore return code if sync clock is switched off. */ - if (rc == -EOPNOTSUPP || rc == -EACCES) - rc = 0; - if (ccw) { ccw->count = sizeof(struct DE_eckd_data); ccw->flags |= CCW_FLAG_SLI; @@ -286,12 +287,12 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, case DASD_ECKD_CCW_WRITE_KD_MT: data->mask.perm = 0x02; data->attributes.operation = private->attrib.operation; - rc = check_XRC(ccw, data, device); + rc = set_timestamp(ccw, data, device); break; case DASD_ECKD_CCW_WRITE_CKD: case DASD_ECKD_CCW_WRITE_CKD_MT: data->attributes.operation = DASD_BYPASS_CACHE; - rc = check_XRC(ccw, data, device); + rc = set_timestamp(ccw, data, device); break; case DASD_ECKD_CCW_ERASE: case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: @@ -299,7 +300,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, data->mask.perm = 0x3; data->mask.auth = 0x1; data->attributes.operation = DASD_BYPASS_CACHE; - rc = check_XRC(ccw, data, device); + rc = set_timestamp(ccw, data, device); break; case DASD_ECKD_CCW_WRITE_FULL_TRACK: data->mask.perm = 0x03; @@ -310,7 +311,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, data->mask.perm = 0x02; data->attributes.operation = private->attrib.operation; data->blk_size = blksize; - rc = check_XRC(ccw, data, device); + rc = set_timestamp(ccw, data, device); break; default: dev_err(&device->cdev->dev, @@ -993,7 +994,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device) struct dasd_eckd_private *private, path_private; struct dasd_uid *uid; char print_path_uid[60], print_device_uid[60]; - struct channel_path_desc *chp_desc; + struct channel_path_desc_fmt0 *chp_desc; struct subchannel_id sch_id; private = device->private; @@ -3440,7 +3441,7 @@ static int prepare_itcw(struct itcw *itcw, dedata->mask.perm = 0x02; dedata->attributes.operation = basepriv->attrib.operation; dedata->blk_size = blksize; - rc = check_XRC(NULL, dedata, basedev); + rc = set_timestamp(NULL, dedata, basedev); dedata->ga_extended |= 0x42; lredata->operation.orientation = 0x0; lredata->operation.operation = 0x3F; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 9cae08b36b80..0a312e450207 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -633,7 +633,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->private_data = dev_info; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); - queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); + blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index b4130c7880d8..b1fcb76dd272 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -472,8 +472,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) blk_queue_logical_block_size(rq, 1 << 12); blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ blk_queue_max_segments(rq, nr_max_blk); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); + blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); bdev->gendisk = alloc_disk(SCM_NR_PARTS); if (!bdev->gendisk) { diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 2a6334ca750e..3df5d68d09f0 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -348,8 +348,8 @@ static int __init xpram_setup_blkdev(void) put_disk(xpram_disks[i]); goto out; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); + blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_logical_block_size(xpram_queues[i], 4096); } diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index a2b33a22c82a..d049e2d74484 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -23,7 +23,7 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE) obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ - sclp_early.o sclp_early_core.o + sclp_early.o sclp_early_core.o sclp_sd.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 98a5c459a1bf..60845d467a1b 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c @@ -9,7 +9,9 @@ #include <linux/kbd_kern.h> #include <linux/kbd_diacr.h> -u_short plain_map[NR_KEYS] = { +#include "keyboard.h" + +u_short ebc_plain_map[NR_KEYS] = { 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, @@ -85,12 +87,12 @@ static u_short shift_ctrl_map[NR_KEYS] = { 0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, }; -ushort *key_maps[MAX_NR_KEYMAPS] = { - plain_map, shift_map, NULL, NULL, +ushort *ebc_key_maps[MAX_NR_KEYMAPS] = { + ebc_plain_map, shift_map, NULL, NULL, ctrl_map, shift_ctrl_map, NULL, }; -unsigned int keymap_count = 4; +unsigned int ebc_keymap_count = 4; /* @@ -99,7 +101,7 @@ unsigned int keymap_count = 4; * the default and allocate dynamically in chunks of 512 bytes. */ -char func_buf[] = { +char ebc_func_buf[] = { '\033', '[', '[', 'A', 0, '\033', '[', '[', 'B', 0, '\033', '[', '[', 'C', 0, @@ -123,37 +125,37 @@ char func_buf[] = { }; -char *funcbufptr = func_buf; -int funcbufsize = sizeof(func_buf); -int funcbufleft = 0; /* space left */ - -char *func_table[MAX_NR_FUNC] = { - func_buf + 0, - func_buf + 5, - func_buf + 10, - func_buf + 15, - func_buf + 20, - func_buf + 25, - func_buf + 31, - func_buf + 37, - func_buf + 43, - func_buf + 49, - func_buf + 55, - func_buf + 61, - func_buf + 67, - func_buf + 73, - func_buf + 79, - func_buf + 85, - func_buf + 91, - func_buf + 97, - func_buf + 103, - func_buf + 109, +char *ebc_funcbufptr = ebc_func_buf; +int ebc_funcbufsize = sizeof(ebc_func_buf); +int ebc_funcbufleft; /* space left */ + +char *ebc_func_table[MAX_NR_FUNC] = { + ebc_func_buf + 0, + ebc_func_buf + 5, + ebc_func_buf + 10, + ebc_func_buf + 15, + ebc_func_buf + 20, + ebc_func_buf + 25, + ebc_func_buf + 31, + ebc_func_buf + 37, + ebc_func_buf + 43, + ebc_func_buf + 49, + ebc_func_buf + 55, + ebc_func_buf + 61, + ebc_func_buf + 67, + ebc_func_buf + 73, + ebc_func_buf + 79, + ebc_func_buf + 85, + ebc_func_buf + 91, + ebc_func_buf + 97, + ebc_func_buf + 103, + ebc_func_buf + 109, NULL, }; -struct kbdiacruc accent_table[MAX_DIACR] = { +struct kbdiacruc ebc_accent_table[MAX_DIACR] = { {'^', 'c', 0003}, {'^', 'd', 0004}, {'^', 'z', 0032}, {'^', 0012, 0000}, }; -unsigned int accent_table_size = 4; +unsigned int ebc_accent_table_size = 4; diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 5b505fdaedec..db1fbf9b00b5 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -54,24 +54,24 @@ kbd_alloc(void) { kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); if (!kbd) goto out; - kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL); + kbd->key_maps = kzalloc(sizeof(ebc_key_maps), GFP_KERNEL); if (!kbd->key_maps) goto out_kbd; - for (i = 0; i < ARRAY_SIZE(key_maps); i++) { - if (key_maps[i]) { - kbd->key_maps[i] = kmemdup(key_maps[i], + for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) { + if (ebc_key_maps[i]) { + kbd->key_maps[i] = kmemdup(ebc_key_maps[i], sizeof(u_short) * NR_KEYS, GFP_KERNEL); if (!kbd->key_maps[i]) goto out_maps; } } - kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); + kbd->func_table = kzalloc(sizeof(ebc_func_table), GFP_KERNEL); if (!kbd->func_table) goto out_maps; - for (i = 0; i < ARRAY_SIZE(func_table); i++) { - if (func_table[i]) { - kbd->func_table[i] = kstrdup(func_table[i], + for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++) { + if (ebc_func_table[i]) { + kbd->func_table[i] = kstrdup(ebc_func_table[i], GFP_KERNEL); if (!kbd->func_table[i]) goto out_func; @@ -81,22 +81,22 @@ kbd_alloc(void) { kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); if (!kbd->fn_handler) goto out_func; - kbd->accent_table = kmemdup(accent_table, + kbd->accent_table = kmemdup(ebc_accent_table, sizeof(struct kbdiacruc) * MAX_DIACR, GFP_KERNEL); if (!kbd->accent_table) goto out_fn_handler; - kbd->accent_table_size = accent_table_size; + kbd->accent_table_size = ebc_accent_table_size; return kbd; out_fn_handler: kfree(kbd->fn_handler); out_func: - for (i = 0; i < ARRAY_SIZE(func_table); i++) + for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++) kfree(kbd->func_table[i]); kfree(kbd->func_table); out_maps: - for (i = 0; i < ARRAY_SIZE(key_maps); i++) + for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) kfree(kbd->key_maps[i]); kfree(kbd->key_maps); out_kbd: @@ -112,10 +112,10 @@ kbd_free(struct kbd_data *kbd) kfree(kbd->accent_table); kfree(kbd->fn_handler); - for (i = 0; i < ARRAY_SIZE(func_table); i++) + for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++) kfree(kbd->func_table[i]); kfree(kbd->func_table); - for (i = 0; i < ARRAY_SIZE(key_maps); i++) + for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) kfree(kbd->key_maps[i]); kfree(kbd->key_maps); kfree(kbd); @@ -131,7 +131,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc) int i, j, k; memset(ascebc, 0x40, 256); - for (i = 0; i < ARRAY_SIZE(key_maps); i++) { + for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) { keymap = kbd->key_maps[i]; if (!keymap) continue; @@ -158,7 +158,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc) int i, j, k; memset(ebcasc, ' ', 256); - for (i = 0; i < ARRAY_SIZE(key_maps); i++) { + for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) { keymap = kbd->key_maps[i]; if (!keymap) continue; diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h index a074d9711628..c467589c7f45 100644 --- a/drivers/s390/char/keyboard.h +++ b/drivers/s390/char/keyboard.h @@ -14,6 +14,17 @@ struct kbd_data; +extern int ebc_funcbufsize, ebc_funcbufleft; +extern char *ebc_func_table[MAX_NR_FUNC]; +extern char ebc_func_buf[]; +extern char *ebc_funcbufptr; +extern unsigned int ebc_keymap_count; + +extern struct kbdiacruc ebc_accent_table[]; +extern unsigned int ebc_accent_table_size; +extern unsigned short *ebc_key_maps[MAX_NR_KEYMAPS]; +extern unsigned short ebc_plain_map[NR_KEYS]; + typedef void (fn_handler_fn)(struct kbd_data *); /* diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index e4e2df7a478e..e9aa71cdfc44 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -417,7 +417,7 @@ sclp_dispatch_evbufs(struct sccb_header *sccb) reg = NULL; list_for_each(l, &sclp_reg_list) { reg = list_entry(l, struct sclp_register, list); - if (reg->receive_mask & (1 << (32 - evbuf->type))) + if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type)) break; else reg = NULL; @@ -618,9 +618,12 @@ struct sclp_statechangebuf { u16 _zeros : 12; u16 mask_length; u64 sclp_active_facility_mask; - sccb_mask_t sclp_receive_mask; - sccb_mask_t sclp_send_mask; - u32 read_data_function_mask; + u8 masks[2 * 1021 + 4]; /* variable length */ + /* + * u8 sclp_receive_mask[mask_length]; + * u8 sclp_send_mask[mask_length]; + * u32 read_data_function_mask; + */ } __attribute__((packed)); @@ -631,14 +634,14 @@ sclp_state_change_cb(struct evbuf_header *evbuf) unsigned long flags; struct sclp_statechangebuf *scbuf; + BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE); + scbuf = (struct sclp_statechangebuf *) evbuf; - if (scbuf->mask_length != sizeof(sccb_mask_t)) - return; spin_lock_irqsave(&sclp_lock, flags); if (scbuf->validity_sclp_receive_mask) - sclp_receive_mask = scbuf->sclp_receive_mask; + sclp_receive_mask = sccb_get_recv_mask(scbuf); if (scbuf->validity_sclp_send_mask) - sclp_send_mask = scbuf->sclp_send_mask; + sclp_send_mask = sccb_get_send_mask(scbuf); spin_unlock_irqrestore(&sclp_lock, flags); if (scbuf->validity_sclp_active_facility_mask) sclp.facilities = scbuf->sclp_active_facility_mask; @@ -748,7 +751,7 @@ EXPORT_SYMBOL(sclp_remove_processed); /* Prepare init mask request. Called while sclp_lock is locked. */ static inline void -__sclp_make_init_req(u32 receive_mask, u32 send_mask) +__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask) { struct init_sccb *sccb; @@ -761,12 +764,15 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask) sclp_init_req.callback = NULL; sclp_init_req.callback_data = NULL; sclp_init_req.sccb = sccb; - sccb->header.length = sizeof(struct init_sccb); - sccb->mask_length = sizeof(sccb_mask_t); - sccb->receive_mask = receive_mask; - sccb->send_mask = send_mask; - sccb->sclp_receive_mask = 0; - sccb->sclp_send_mask = 0; + sccb->header.length = sizeof(*sccb); + if (sclp_mask_compat_mode) + sccb->mask_length = SCLP_MASK_SIZE_COMPAT; + else + sccb->mask_length = sizeof(sccb_mask_t); + sccb_set_recv_mask(sccb, receive_mask); + sccb_set_send_mask(sccb, send_mask); + sccb_set_sclp_recv_mask(sccb, 0); + sccb_set_sclp_send_mask(sccb, 0); } /* Start init mask request. If calculate is non-zero, calculate the mask as @@ -822,8 +828,8 @@ sclp_init_mask(int calculate) sccb->header.response_code == 0x20) { /* Successful request */ if (calculate) { - sclp_receive_mask = sccb->sclp_receive_mask; - sclp_send_mask = sccb->sclp_send_mask; + sclp_receive_mask = sccb_get_sclp_recv_mask(sccb); + sclp_send_mask = sccb_get_sclp_send_mask(sccb); } else { sclp_receive_mask = 0; sclp_send_mask = 0; @@ -974,12 +980,18 @@ sclp_check_interface(void) irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); spin_lock_irqsave(&sclp_lock, flags); del_timer(&sclp_request_timer); - if (sclp_init_req.status == SCLP_REQ_DONE && - sccb->header.response_code == 0x20) { - rc = 0; - break; - } else - rc = -EBUSY; + rc = -EBUSY; + if (sclp_init_req.status == SCLP_REQ_DONE) { + if (sccb->header.response_code == 0x20) { + rc = 0; + break; + } else if (sccb->header.response_code == 0x74f0) { + if (!sclp_mask_compat_mode) { + sclp_mask_compat_mode = true; + retry = 0; + } + } + } } unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); spin_unlock_irqrestore(&sclp_lock, flags); diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index f41f6e2ca063..1fe4918088e7 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -18,7 +18,7 @@ #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) #define SCLP_CONSOLE_PAGES 6 -#define SCLP_EVTYP_MASK(T) (1U << (32 - (T))) +#define SCLP_EVTYP_MASK(T) (1UL << (sizeof(sccb_mask_t) * BITS_PER_BYTE - (T))) #define EVTYP_OPCMD 0x01 #define EVTYP_MSG 0x02 @@ -28,6 +28,7 @@ #define EVTYP_PMSGCMD 0x09 #define EVTYP_ASYNC 0x0A #define EVTYP_CTLPROGIDENT 0x0B +#define EVTYP_STORE_DATA 0x0C #define EVTYP_ERRNOTIFY 0x18 #define EVTYP_VT220MSG 0x1A #define EVTYP_SDIAS 0x1C @@ -42,6 +43,7 @@ #define EVTYP_PMSGCMD_MASK SCLP_EVTYP_MASK(EVTYP_PMSGCMD) #define EVTYP_ASYNC_MASK SCLP_EVTYP_MASK(EVTYP_ASYNC) #define EVTYP_CTLPROGIDENT_MASK SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT) +#define EVTYP_STORE_DATA_MASK SCLP_EVTYP_MASK(EVTYP_STORE_DATA) #define EVTYP_ERRNOTIFY_MASK SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY) #define EVTYP_VT220MSG_MASK SCLP_EVTYP_MASK(EVTYP_VT220MSG) #define EVTYP_SDIAS_MASK SCLP_EVTYP_MASK(EVTYP_SDIAS) @@ -85,7 +87,7 @@ enum sclp_pm_event { #define SCLP_PANIC_PRIO 1 #define SCLP_PANIC_PRIO_CLIENT 0 -typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ +typedef u64 sccb_mask_t; struct sccb_header { u16 length; @@ -98,12 +100,53 @@ struct init_sccb { struct sccb_header header; u16 _reserved; u16 mask_length; - sccb_mask_t receive_mask; - sccb_mask_t send_mask; - sccb_mask_t sclp_receive_mask; - sccb_mask_t sclp_send_mask; + u8 masks[4 * 1021]; /* variable length */ + /* + * u8 receive_mask[mask_length]; + * u8 send_mask[mask_length]; + * u8 sclp_receive_mask[mask_length]; + * u8 sclp_send_mask[mask_length]; + */ } __attribute__((packed)); +#define SCLP_MASK_SIZE_COMPAT 4 + +static inline sccb_mask_t sccb_get_mask(u8 *masks, size_t len, int i) +{ + sccb_mask_t res = 0; + + memcpy(&res, masks + i * len, min(sizeof(res), len)); + return res; +} + +static inline void sccb_set_mask(u8 *masks, size_t len, int i, sccb_mask_t val) +{ + memset(masks + i * len, 0, len); + memcpy(masks + i * len, &val, min(sizeof(val), len)); +} + +#define sccb_get_generic_mask(sccb, i) \ +({ \ + __typeof__(sccb) __sccb = sccb; \ + \ + sccb_get_mask(__sccb->masks, __sccb->mask_length, i); \ +}) +#define sccb_get_recv_mask(sccb) sccb_get_generic_mask(sccb, 0) +#define sccb_get_send_mask(sccb) sccb_get_generic_mask(sccb, 1) +#define sccb_get_sclp_recv_mask(sccb) sccb_get_generic_mask(sccb, 2) +#define sccb_get_sclp_send_mask(sccb) sccb_get_generic_mask(sccb, 3) + +#define sccb_set_generic_mask(sccb, i, val) \ +({ \ + __typeof__(sccb) __sccb = sccb; \ + \ + sccb_set_mask(__sccb->masks, __sccb->mask_length, i, val); \ +}) +#define sccb_set_recv_mask(sccb, val) sccb_set_generic_mask(sccb, 0, val) +#define sccb_set_send_mask(sccb, val) sccb_set_generic_mask(sccb, 1, val) +#define sccb_set_sclp_recv_mask(sccb, val) sccb_set_generic_mask(sccb, 2, val) +#define sccb_set_sclp_send_mask(sccb, val) sccb_set_generic_mask(sccb, 3, val) + struct read_cpu_info_sccb { struct sccb_header header; u16 nr_configured; @@ -221,15 +264,17 @@ extern int sclp_init_state; extern int sclp_console_pages; extern int sclp_console_drop; extern unsigned long sclp_console_full; +extern bool sclp_mask_compat_mode; extern char sclp_early_sccb[PAGE_SIZE]; void sclp_early_wait_irq(void); int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb); unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb); +unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb); int sclp_early_set_event_mask(struct init_sccb *sccb, - unsigned long receive_mask, - unsigned long send_mask); + sccb_mask_t receive_mask, + sccb_mask_t send_mask); /* useful inlines */ diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 6b1891539c84..9a74abb9224d 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -249,7 +249,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb) if (sccb->header.response_code != 0x20) return; - if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) + if (sclp_early_con_check_vt220(sccb)) sclp.has_vt220 = 1; if (sclp_early_con_check_linemode(sccb)) diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index 17b0c67f3e8d..eceba3858cef 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -14,6 +14,11 @@ char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data); int sclp_init_state __section(.data) = sclp_init_state_uninitialized; +/* + * Used to keep track of the size of the event masks. Qemu until version 2.11 + * only supports 4 and needs a workaround. + */ +bool sclp_mask_compat_mode __section(.data); void sclp_early_wait_irq(void) { @@ -142,16 +147,24 @@ static void sclp_early_print_vt220(const char *str, unsigned int len) } int sclp_early_set_event_mask(struct init_sccb *sccb, - unsigned long receive_mask, - unsigned long send_mask) + sccb_mask_t receive_mask, + sccb_mask_t send_mask) { +retry: memset(sccb, 0, sizeof(*sccb)); sccb->header.length = sizeof(*sccb); - sccb->mask_length = sizeof(sccb_mask_t); - sccb->receive_mask = receive_mask; - sccb->send_mask = send_mask; + if (sclp_mask_compat_mode) + sccb->mask_length = SCLP_MASK_SIZE_COMPAT; + else + sccb->mask_length = sizeof(sccb_mask_t); + sccb_set_recv_mask(sccb, receive_mask); + sccb_set_send_mask(sccb, send_mask); if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_MASK, sccb)) return -EIO; + if ((sccb->header.response_code == 0x74f0) && !sclp_mask_compat_mode) { + sclp_mask_compat_mode = true; + goto retry; + } if (sccb->header.response_code != 0x20) return -EIO; return 0; @@ -159,19 +172,28 @@ int sclp_early_set_event_mask(struct init_sccb *sccb, unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb) { - if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK)) + if (!(sccb_get_sclp_send_mask(sccb) & EVTYP_OPCMD_MASK)) return 0; - if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) + if (!(sccb_get_sclp_recv_mask(sccb) & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) return 0; return 1; } +unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb) +{ + if (sccb_get_sclp_send_mask(sccb) & EVTYP_VT220MSG_MASK) + return 1; + return 0; +} + static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220) { unsigned long receive_mask, send_mask; struct init_sccb *sccb; int rc; + BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE); + *have_linemode = *have_vt220 = 0; sccb = (struct init_sccb *) &sclp_early_sccb; receive_mask = disable ? 0 : EVTYP_OPCMD_MASK; @@ -180,7 +202,7 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220) if (rc) return rc; *have_linemode = sclp_early_con_check_linemode(sccb); - *have_vt220 = sccb->send_mask & EVTYP_VT220MSG_MASK; + *have_vt220 = !!(sccb_get_send_mask(sccb) & EVTYP_VT220MSG_MASK); return rc; } diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c new file mode 100644 index 000000000000..99f41db5123b --- /dev/null +++ b/drivers/s390/char/sclp_sd.c @@ -0,0 +1,569 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCLP Store Data support and sysfs interface + * + * Copyright IBM Corp. 2017 + */ + +#define KMSG_COMPONENT "sclp_sd" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/completion.h> +#include <linux/kobject.h> +#include <linux/list.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/async.h> +#include <linux/export.h> +#include <linux/mutex.h> + +#include <asm/pgalloc.h> + +#include "sclp.h" + +#define SD_EQ_STORE_DATA 0 +#define SD_EQ_HALT 1 +#define SD_EQ_SIZE 2 + +#define SD_DI_CONFIG 3 + +struct sclp_sd_evbuf { + struct evbuf_header hdr; + u8 eq; + u8 di; + u8 rflags; + u64 :56; + u32 id; + u16 :16; + u8 fmt; + u8 status; + u64 sat; + u64 sa; + u32 esize; + u32 dsize; +} __packed; + +struct sclp_sd_sccb { + struct sccb_header hdr; + struct sclp_sd_evbuf evbuf; +} __packed __aligned(PAGE_SIZE); + +/** + * struct sclp_sd_data - Result of a Store Data request + * @esize_bytes: Resulting esize in bytes + * @dsize_bytes: Resulting dsize in bytes + * @data: Pointer to data - must be released using vfree() + */ +struct sclp_sd_data { + size_t esize_bytes; + size_t dsize_bytes; + void *data; +}; + +/** + * struct sclp_sd_listener - Listener for asynchronous Store Data response + * @list: For enqueueing this struct + * @id: Event ID of response to listen for + * @completion: Can be used to wait for response + * @evbuf: Contains the resulting Store Data response after completion + */ +struct sclp_sd_listener { + struct list_head list; + u32 id; + struct completion completion; + struct sclp_sd_evbuf evbuf; +}; + +/** + * struct sclp_sd_file - Sysfs representation of a Store Data entity + * @kobj: Kobject + * @data_attr: Attribute for accessing data contents + * @data_mutex: Mutex to serialize access and updates to @data + * @data: Data associated with this entity + * @di: DI value associated with this entity + */ +struct sclp_sd_file { + struct kobject kobj; + struct bin_attribute data_attr; + struct mutex data_mutex; + struct sclp_sd_data data; + u8 di; +}; +#define to_sd_file(x) container_of(x, struct sclp_sd_file, kobj) + +static struct kset *sclp_sd_kset; +static struct sclp_sd_file *config_file; + +static LIST_HEAD(sclp_sd_queue); +static DEFINE_SPINLOCK(sclp_sd_queue_lock); + +/** + * sclp_sd_listener_add() - Add listener for Store Data responses + * @listener: Listener to add + */ +static void sclp_sd_listener_add(struct sclp_sd_listener *listener) +{ + spin_lock_irq(&sclp_sd_queue_lock); + list_add_tail(&listener->list, &sclp_sd_queue); + spin_unlock_irq(&sclp_sd_queue_lock); +} + +/** + * sclp_sd_listener_remove() - Remove listener for Store Data responses + * @listener: Listener to remove + */ +static void sclp_sd_listener_remove(struct sclp_sd_listener *listener) +{ + spin_lock_irq(&sclp_sd_queue_lock); + list_del(&listener->list); + spin_unlock_irq(&sclp_sd_queue_lock); +} + +/** + * sclp_sd_listener_init() - Initialize a Store Data response listener + * @id: Event ID to listen for + * + * Initialize a listener for asynchronous Store Data responses. This listener + * can afterwards be used to wait for a specific response and to retrieve + * the associated response data. + */ +static void sclp_sd_listener_init(struct sclp_sd_listener *listener, u32 id) +{ + memset(listener, 0, sizeof(*listener)); + listener->id = id; + init_completion(&listener->completion); +} + +/** + * sclp_sd_receiver() - Receiver for Store Data events + * @evbuf_hdr: Header of received events + * + * Process Store Data events and complete listeners with matching event IDs. + */ +static void sclp_sd_receiver(struct evbuf_header *evbuf_hdr) +{ + struct sclp_sd_evbuf *evbuf = (struct sclp_sd_evbuf *) evbuf_hdr; + struct sclp_sd_listener *listener; + int found = 0; + + pr_debug("received event (id=0x%08x)\n", evbuf->id); + spin_lock(&sclp_sd_queue_lock); + list_for_each_entry(listener, &sclp_sd_queue, list) { + if (listener->id != evbuf->id) + continue; + + listener->evbuf = *evbuf; + complete(&listener->completion); + found = 1; + break; + } + spin_unlock(&sclp_sd_queue_lock); + + if (!found) + pr_debug("unsolicited event (id=0x%08x)\n", evbuf->id); +} + +static struct sclp_register sclp_sd_register = { + .send_mask = EVTYP_STORE_DATA_MASK, + .receive_mask = EVTYP_STORE_DATA_MASK, + .receiver_fn = sclp_sd_receiver, +}; + +/** + * sclp_sd_sync() - Perform Store Data request synchronously + * @page: Address of work page - must be below 2GB + * @eq: Input EQ value + * @di: Input DI value + * @sat: Input SAT value + * @sa: Input SA value used to specify the address of the target buffer + * @dsize_ptr: Optional pointer to input and output DSIZE value + * @esize_ptr: Optional pointer to output ESIZE value + * + * Perform Store Data request with specified parameters and wait for completion. + * + * Return %0 on success and store resulting DSIZE and ESIZE values in + * @dsize_ptr and @esize_ptr (if provided). Return non-zero on error. + */ +static int sclp_sd_sync(unsigned long page, u8 eq, u8 di, u64 sat, u64 sa, + u32 *dsize_ptr, u32 *esize_ptr) +{ + struct sclp_sd_sccb *sccb = (void *) page; + struct sclp_sd_listener listener; + struct sclp_sd_evbuf *evbuf; + int rc; + + sclp_sd_listener_init(&listener, (u32) (addr_t) sccb); + sclp_sd_listener_add(&listener); + + /* Prepare SCCB */ + memset(sccb, 0, PAGE_SIZE); + sccb->hdr.length = sizeof(sccb->hdr) + sizeof(sccb->evbuf); + evbuf = &sccb->evbuf; + evbuf->hdr.length = sizeof(*evbuf); + evbuf->hdr.type = EVTYP_STORE_DATA; + evbuf->eq = eq; + evbuf->di = di; + evbuf->id = listener.id; + evbuf->fmt = 1; + evbuf->sat = sat; + evbuf->sa = sa; + if (dsize_ptr) + evbuf->dsize = *dsize_ptr; + + /* Perform command */ + pr_debug("request (eq=%d, di=%d, id=0x%08x)\n", eq, di, listener.id); + rc = sclp_sync_request(SCLP_CMDW_WRITE_EVENT_DATA, sccb); + pr_debug("request done (rc=%d)\n", rc); + if (rc) + goto out; + + /* Evaluate response */ + if (sccb->hdr.response_code == 0x73f0) { + pr_debug("event not supported\n"); + rc = -EIO; + goto out_remove; + } + if (sccb->hdr.response_code != 0x0020 || !(evbuf->hdr.flags & 0x80)) { + rc = -EIO; + goto out; + } + if (!(evbuf->rflags & 0x80)) { + rc = wait_for_completion_interruptible(&listener.completion); + if (rc) + goto out; + evbuf = &listener.evbuf; + } + switch (evbuf->status) { + case 0: + if (dsize_ptr) + *dsize_ptr = evbuf->dsize; + if (esize_ptr) + *esize_ptr = evbuf->esize; + pr_debug("success (dsize=%u, esize=%u)\n", evbuf->dsize, + evbuf->esize); + break; + case 3: + rc = -ENOENT; + break; + default: + rc = -EIO; + break; + + } + +out: + if (rc && rc != -ENOENT) { + /* Provide some information about what went wrong */ + pr_warn("Store Data request failed (eq=%d, di=%d, " + "response=0x%04x, flags=0x%02x, status=%d, rc=%d)\n", + eq, di, sccb->hdr.response_code, evbuf->hdr.flags, + evbuf->status, rc); + } + +out_remove: + sclp_sd_listener_remove(&listener); + + return rc; +} + +/** + * sclp_sd_store_data() - Obtain data for specified Store Data entity + * @result: Resulting data + * @di: DI value associated with this entity + * + * Perform a series of Store Data requests to obtain the size and contents of + * the specified Store Data entity. + * + * Return: + * %0: Success - result is stored in @result. @result->data must be + * released using vfree() after use. + * %-ENOENT: No data available for this entity + * %<0: Other error + */ +static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di) +{ + u32 dsize = 0, esize = 0; + unsigned long page, asce = 0; + void *data = NULL; + int rc; + + page = __get_free_page(GFP_KERNEL | GFP_DMA); + if (!page) + return -ENOMEM; + + /* Get size */ + rc = sclp_sd_sync(page, SD_EQ_SIZE, di, 0, 0, &dsize, &esize); + if (rc) + goto out; + if (dsize == 0) + goto out_result; + + /* Allocate memory */ + data = vzalloc((size_t) dsize * PAGE_SIZE); + if (!data) { + rc = -ENOMEM; + goto out; + } + + /* Get translation table for buffer */ + asce = base_asce_alloc((unsigned long) data, dsize); + if (!asce) { + vfree(data); + rc = -ENOMEM; + goto out; + } + + /* Get data */ + rc = sclp_sd_sync(page, SD_EQ_STORE_DATA, di, asce, (u64) data, &dsize, + &esize); + if (rc) { + /* Cancel running request if interrupted */ + if (rc == -ERESTARTSYS) + sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL); + vfree(data); + goto out; + } + +out_result: + result->esize_bytes = (size_t) esize * PAGE_SIZE; + result->dsize_bytes = (size_t) dsize * PAGE_SIZE; + result->data = data; + +out: + base_asce_free(asce); + free_page(page); + + return rc; +} + +/** + * sclp_sd_data_reset() - Reset Store Data result buffer + * @data: Data buffer to reset + * + * Reset @data to initial state and release associated memory. + */ +static void sclp_sd_data_reset(struct sclp_sd_data *data) +{ + vfree(data->data); + data->data = NULL; + data->dsize_bytes = 0; + data->esize_bytes = 0; +} + +/** + * sclp_sd_file_release() - Release function for sclp_sd_file object + * @kobj: Kobject embedded in sclp_sd_file object + */ +static void sclp_sd_file_release(struct kobject *kobj) +{ + struct sclp_sd_file *sd_file = to_sd_file(kobj); + + sclp_sd_data_reset(&sd_file->data); + kfree(sd_file); +} + +/** + * sclp_sd_file_update() - Update contents of sclp_sd_file object + * @sd_file: Object to update + * + * Obtain the current version of data associated with the Store Data entity + * @sd_file. + * + * On success, return %0 and generate a KOBJ_CHANGE event to indicate that the + * data may have changed. Return non-zero otherwise. + */ +static int sclp_sd_file_update(struct sclp_sd_file *sd_file) +{ + const char *name = kobject_name(&sd_file->kobj); + struct sclp_sd_data data; + int rc; + + rc = sclp_sd_store_data(&data, sd_file->di); + if (rc) { + if (rc == -ENOENT) { + pr_info("No data is available for the %s data entity\n", + name); + } + return rc; + } + + mutex_lock(&sd_file->data_mutex); + sclp_sd_data_reset(&sd_file->data); + sd_file->data = data; + mutex_unlock(&sd_file->data_mutex); + + pr_info("A %zu-byte %s data entity was retrieved\n", data.dsize_bytes, + name); + kobject_uevent(&sd_file->kobj, KOBJ_CHANGE); + + return 0; +} + +/** + * sclp_sd_file_update_async() - Wrapper for asynchronous update call + * @data: Object to update + */ +static void sclp_sd_file_update_async(void *data, async_cookie_t cookie) +{ + struct sclp_sd_file *sd_file = data; + + sclp_sd_file_update(sd_file); +} + +/** + * reload_store() - Store function for "reload" sysfs attribute + * @kobj: Kobject of sclp_sd_file object + * + * Initiate a reload of the data associated with an sclp_sd_file object. + */ +static ssize_t reload_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct sclp_sd_file *sd_file = to_sd_file(kobj); + + sclp_sd_file_update(sd_file); + + return count; +} + +static struct kobj_attribute reload_attr = __ATTR_WO(reload); + +static struct attribute *sclp_sd_file_default_attrs[] = { + &reload_attr.attr, + NULL, +}; + +static struct kobj_type sclp_sd_file_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = sclp_sd_file_release, + .default_attrs = sclp_sd_file_default_attrs, +}; + +/** + * data_read() - Read function for "read" sysfs attribute + * @kobj: Kobject of sclp_sd_file object + * @buffer: Target buffer + * @off: Requested file offset + * @size: Requested number of bytes + * + * Store the requested portion of the Store Data entity contents into the + * specified buffer. Return the number of bytes stored on success, or %0 + * on EOF. + */ +static ssize_t data_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buffer, + loff_t off, size_t size) +{ + struct sclp_sd_file *sd_file = to_sd_file(kobj); + size_t data_size; + char *data; + + mutex_lock(&sd_file->data_mutex); + + data = sd_file->data.data; + data_size = sd_file->data.dsize_bytes; + if (!data || off >= data_size) { + size = 0; + } else { + if (off + size > data_size) + size = data_size - off; + memcpy(buffer, data + off, size); + } + + mutex_unlock(&sd_file->data_mutex); + + return size; +} + +/** + * sclp_sd_file_create() - Add a sysfs file representing a Store Data entity + * @name: Name of file + * @di: DI value associated with this entity + * + * Create a sysfs directory with the given @name located under + * + * /sys/firmware/sclp_sd/ + * + * The files in this directory can be used to access the contents of the Store + * Data entity associated with @DI. + * + * Return pointer to resulting sclp_sd_file object on success, %NULL otherwise. + * The object must be freed by calling kobject_put() on the embedded kobject + * pointer after use. + */ +static __init struct sclp_sd_file *sclp_sd_file_create(const char *name, u8 di) +{ + struct sclp_sd_file *sd_file; + int rc; + + sd_file = kzalloc(sizeof(*sd_file), GFP_KERNEL); + if (!sd_file) + return NULL; + sd_file->di = di; + mutex_init(&sd_file->data_mutex); + + /* Create kobject located under /sys/firmware/sclp_sd/ */ + sd_file->kobj.kset = sclp_sd_kset; + rc = kobject_init_and_add(&sd_file->kobj, &sclp_sd_file_ktype, NULL, + "%s", name); + if (rc) { + kobject_put(&sd_file->kobj); + return NULL; + } + + sysfs_bin_attr_init(&sd_file->data_attr); + sd_file->data_attr.attr.name = "data"; + sd_file->data_attr.attr.mode = 0444; + sd_file->data_attr.read = data_read; + + rc = sysfs_create_bin_file(&sd_file->kobj, &sd_file->data_attr); + if (rc) { + kobject_put(&sd_file->kobj); + return NULL; + } + + /* + * For completeness only - users interested in entity data should listen + * for KOBJ_CHANGE instead. + */ + kobject_uevent(&sd_file->kobj, KOBJ_ADD); + + /* Don't let a slow Store Data request delay further initialization */ + async_schedule(sclp_sd_file_update_async, sd_file); + + return sd_file; +} + +/** + * sclp_sd_init() - Initialize sclp_sd support and register sysfs files + */ +static __init int sclp_sd_init(void) +{ + int rc; + + rc = sclp_register(&sclp_sd_register); + if (rc) + return rc; + + /* Create kset named "sclp_sd" located under /sys/firmware/ */ + rc = -ENOMEM; + sclp_sd_kset = kset_create_and_add("sclp_sd", NULL, firmware_kobj); + if (!sclp_sd_kset) + goto err_kset; + + rc = -EINVAL; + config_file = sclp_sd_file_create("config", SD_DI_CONFIG); + if (!config_file) + goto err_config; + + return 0; + +err_config: + kset_unregister(sclp_sd_kset); +err_kset: + sclp_unregister(&sclp_sd_register); + + return rc; +} +device_initcall(sclp_sd_init); diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 9f7b87d6d434..5aff8b684eb2 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -502,7 +502,10 @@ sclp_tty_init(void) int i; int rc; - if (!CONSOLE_IS_SCLP) + /* z/VM multiplexes the line mode output on the 32xx screen */ + if (MACHINE_IS_VM && !CONSOLE_IS_SCLP) + return 0; + if (!sclp.has_linemode) return 0; driver = alloc_tty_driver(1); if (!driver) diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index bfec1485ca23..5535312602af 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -323,6 +323,9 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, struct ccw_dev_id dev_id; int rc, i; + if (num_devices < 1) + return -EINVAL; + gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), GFP_KERNEL); if (!gdev) @@ -375,7 +378,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, goto error; } /* Check if the devices are bound to the required ccw driver. */ - if (gdev->count && gdrv && gdrv->ccw_driver && + if (gdrv && gdrv->ccw_driver && gdev->cdev[0]->drv != gdrv->ccw_driver) { rc = -EINVAL; goto error; diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index f95b452b8bbc..afbdee74147d 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -384,6 +384,28 @@ static ssize_t chp_chid_external_show(struct device *dev, } static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL); +static ssize_t util_string_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct channel_path *chp = to_channelpath(kobj_to_dev(kobj)); + ssize_t rc; + + mutex_lock(&chp->lock); + rc = memory_read_from_buffer(buf, count, &off, chp->desc_fmt3.util_str, + sizeof(chp->desc_fmt3.util_str)); + mutex_unlock(&chp->lock); + + return rc; +} +static BIN_ATTR_RO(util_string, + sizeof(((struct channel_path_desc_fmt3 *)0)->util_str)); + +static struct bin_attribute *chp_bin_attrs[] = { + &bin_attr_util_string, + NULL, +}; + static struct attribute *chp_attrs[] = { &dev_attr_status.attr, &dev_attr_configure.attr, @@ -396,6 +418,7 @@ static struct attribute *chp_attrs[] = { }; static struct attribute_group chp_attr_group = { .attrs = chp_attrs, + .bin_attrs = chp_bin_attrs, }; static const struct attribute_group *chp_attr_groups[] = { &chp_attr_group, @@ -422,7 +445,7 @@ int chp_update_desc(struct channel_path *chp) { int rc; - rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc); + rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc); if (rc) return rc; @@ -431,6 +454,7 @@ int chp_update_desc(struct channel_path *chp) * hypervisors implement the required chsc commands. */ chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); + chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3); chsc_get_channel_measurement_chars(chp); return 0; @@ -506,20 +530,20 @@ out: * On success return a newly allocated copy of the channel-path description * data associated with the given channel-path ID. Return %NULL on error. */ -struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid) +struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid) { struct channel_path *chp; - struct channel_path_desc *desc; + struct channel_path_desc_fmt0 *desc; chp = chpid_to_chp(chpid); if (!chp) return NULL; - desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); + desc = kmalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return NULL; mutex_lock(&chp->lock); - memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); + memcpy(desc, &chp->desc, sizeof(*desc)); mutex_unlock(&chp->lock); return desc; } diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 7e80323cd261..20259f3fbf45 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h @@ -44,8 +44,9 @@ struct channel_path { struct chp_id chpid; struct mutex lock; /* Serialize access to below members. */ int state; - struct channel_path_desc desc; + struct channel_path_desc_fmt0 desc; struct channel_path_desc_fmt1 desc_fmt1; + struct channel_path_desc_fmt3 desc_fmt3; /* Channel-measurement related stuff: */ int cmg; int shared; @@ -61,7 +62,7 @@ static inline struct channel_path *chpid_to_chp(struct chp_id chpid) int chp_get_status(struct chp_id chpid); u8 chp_get_sch_opm(struct subchannel *sch); int chp_is_registered(struct chp_id chpid); -struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid); +struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid); void chp_remove_cmg_attr(struct channel_path *chp); int chp_add_cmg_attr(struct channel_path *chp); int chp_update_desc(struct channel_path *chp); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index c08fc5a8df0c..6652a49a49b1 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -915,6 +915,8 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, return -EINVAL; if ((rfmt == 2) && !css_general_characteristics.cib) return -EINVAL; + if ((rfmt == 3) && !css_general_characteristics.util_str) + return -EINVAL; memset(page, 0, PAGE_SIZE); scpd_area = page; @@ -940,43 +942,30 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, } EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); -int chsc_determine_base_channel_path_desc(struct chp_id chpid, - struct channel_path_desc *desc) -{ - struct chsc_scpd *scpd_area; - unsigned long flags; - int ret; - - spin_lock_irqsave(&chsc_page_lock, flags); - scpd_area = chsc_page; - ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); - if (ret) - goto out; - - memcpy(desc, scpd_area->data, sizeof(*desc)); -out: - spin_unlock_irqrestore(&chsc_page_lock, flags); - return ret; +#define chsc_det_chp_desc(FMT, c) \ +int chsc_determine_fmt##FMT##_channel_path_desc( \ + struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc) \ +{ \ + struct chsc_scpd *scpd_area; \ + unsigned long flags; \ + int ret; \ + \ + spin_lock_irqsave(&chsc_page_lock, flags); \ + scpd_area = chsc_page; \ + ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \ + scpd_area); \ + if (ret) \ + goto out; \ + \ + memcpy(desc, scpd_area->data, sizeof(*desc)); \ +out: \ + spin_unlock_irqrestore(&chsc_page_lock, flags); \ + return ret; \ } -int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, - struct channel_path_desc_fmt1 *desc) -{ - struct chsc_scpd *scpd_area; - unsigned long flags; - int ret; - - spin_lock_irqsave(&chsc_page_lock, flags); - scpd_area = chsc_page; - ret = chsc_determine_channel_path_desc(chpid, 0, 1, 1, 0, scpd_area); - if (ret) - goto out; - - memcpy(desc, scpd_area->data, sizeof(*desc)); -out: - spin_unlock_irqrestore(&chsc_page_lock, flags); - return ret; -} +chsc_det_chp_desc(0, 0) +chsc_det_chp_desc(1, 1) +chsc_det_chp_desc(3, 0) static void chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index dda5953534b7..5c9f0dd33f4e 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -40,6 +40,11 @@ struct channel_path_desc_fmt1 { u32 zeros[2]; } __attribute__ ((packed)); +struct channel_path_desc_fmt3 { + struct channel_path_desc_fmt1 fmt1_desc; + u8 util_str[64]; +}; + struct channel_path; struct css_chsc_char { @@ -147,10 +152,12 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable); int chsc_chp_vary(struct chp_id chpid, int on); int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, int c, int m, void *page); -int chsc_determine_base_channel_path_desc(struct chp_id chpid, - struct channel_path_desc *desc); +int chsc_determine_fmt0_channel_path_desc(struct chp_id chpid, + struct channel_path_desc_fmt0 *desc); int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, struct channel_path_desc_fmt1 *desc); +int chsc_determine_fmt3_channel_path_desc(struct chp_id chpid, + struct channel_path_desc_fmt3 *desc); void chsc_chp_online(struct chp_id chpid); void chsc_chp_offline(struct chp_id chpid); int chsc_get_channel_measurement_chars(struct channel_path *chp); diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 6886b3d34cf8..5130d7c67239 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -25,7 +25,6 @@ #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/setup.h> -#include <asm/reset.h> #include <asm/ipl.h> #include <asm/chpid.h> #include <asm/airq.h> @@ -767,262 +766,6 @@ void cio_register_early_subchannels(void) } #endif /* CONFIG_CCW_CONSOLE */ -static int -__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) -{ - int retry, cc; - - cc = 0; - for (retry=0;retry<3;retry++) { - schib->pmcw.ena = 0; - cc = msch(schid, schib); - if (cc) - return (cc==3?-ENODEV:-EBUSY); - if (stsch(schid, schib) || !css_sch_is_valid(schib)) - return -ENODEV; - if (!schib->pmcw.ena) - return 0; - } - return -EBUSY; /* uhm... */ -} - -static int -__clear_io_subchannel_easy(struct subchannel_id schid) -{ - int retry; - - if (csch(schid)) - return -ENODEV; - for (retry=0;retry<20;retry++) { - struct tpi_info ti; - - if (tpi(&ti)) { - tsch(ti.schid, this_cpu_ptr(&cio_irb)); - if (schid_equal(&ti.schid, &schid)) - return 0; - } - udelay_simple(100); - } - return -EBUSY; -} - -static void __clear_chsc_subchannel_easy(void) -{ - /* It seems we can only wait for a bit here :/ */ - udelay_simple(100); -} - -static int pgm_check_occured; - -static void cio_reset_pgm_check_handler(void) -{ - pgm_check_occured = 1; -} - -static int stsch_reset(struct subchannel_id schid, struct schib *addr) -{ - int rc; - - pgm_check_occured = 0; - s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; - rc = stsch(schid, addr); - s390_base_pgm_handler_fn = NULL; - - /* The program check handler could have changed pgm_check_occured. */ - barrier(); - - if (pgm_check_occured) - return -EIO; - else - return rc; -} - -static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) -{ - struct schib schib; - - if (stsch_reset(schid, &schib)) - return -ENXIO; - if (!schib.pmcw.ena) - return 0; - switch(__disable_subchannel_easy(schid, &schib)) { - case 0: - case -ENODEV: - break; - default: /* -EBUSY */ - switch (schib.pmcw.st) { - case SUBCHANNEL_TYPE_IO: - if (__clear_io_subchannel_easy(schid)) - goto out; /* give up... */ - break; - case SUBCHANNEL_TYPE_CHSC: - __clear_chsc_subchannel_easy(); - break; - default: - /* No default clear strategy */ - break; - } - stsch(schid, &schib); - __disable_subchannel_easy(schid, &schib); - } -out: - return 0; -} - -static atomic_t chpid_reset_count; - -static void s390_reset_chpids_mcck_handler(void) -{ - struct crw crw; - union mci mci; - - /* Check for pending channel report word. */ - mci.val = S390_lowcore.mcck_interruption_code; - if (!mci.cp) - return; - /* Process channel report words. */ - while (stcrw(&crw) == 0) { - /* Check for responses to RCHP. */ - if (crw.slct && crw.rsc == CRW_RSC_CPATH) - atomic_dec(&chpid_reset_count); - } -} - -#define RCHP_TIMEOUT (30 * USEC_PER_SEC) -static void css_reset(void) -{ - int i, ret; - unsigned long long timeout; - struct chp_id chpid; - - /* Reset subchannels. */ - for_each_subchannel(__shutdown_subchannel_easy, NULL); - /* Reset channel paths. */ - s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; - /* Enable channel report machine checks. */ - __ctl_set_bit(14, 28); - /* Temporarily reenable machine checks. */ - local_mcck_enable(); - chp_id_init(&chpid); - for (i = 0; i <= __MAX_CHPID; i++) { - chpid.id = i; - ret = rchp(chpid); - if ((ret == 0) || (ret == 2)) - /* - * rchp either succeeded, or another rchp is already - * in progress. In either case, we'll get a crw. - */ - atomic_inc(&chpid_reset_count); - } - /* Wait for machine check for all channel paths. */ - timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12); - while (atomic_read(&chpid_reset_count) != 0) { - if (get_tod_clock_fast() > timeout) - break; - cpu_relax(); - } - /* Disable machine checks again. */ - local_mcck_disable(); - /* Disable channel report machine checks. */ - __ctl_clear_bit(14, 28); - s390_base_mcck_handler_fn = NULL; -} - -static struct reset_call css_reset_call = { - .fn = css_reset, -}; - -static int __init init_css_reset_call(void) -{ - atomic_set(&chpid_reset_count, 0); - register_reset_call(&css_reset_call); - return 0; -} - -arch_initcall(init_css_reset_call); - -struct sch_match_id { - struct subchannel_id schid; - struct ccw_dev_id devid; - int rc; -}; - -static int __reipl_subchannel_match(struct subchannel_id schid, void *data) -{ - struct schib schib; - struct sch_match_id *match_id = data; - - if (stsch_reset(schid, &schib)) - return -ENXIO; - if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && - (schib.pmcw.dev == match_id->devid.devno) && - (schid.ssid == match_id->devid.ssid)) { - match_id->schid = schid; - match_id->rc = 0; - return 1; - } - return 0; -} - -static int reipl_find_schid(struct ccw_dev_id *devid, - struct subchannel_id *schid) -{ - struct sch_match_id match_id; - - match_id.devid = *devid; - match_id.rc = -ENODEV; - for_each_subchannel(__reipl_subchannel_match, &match_id); - if (match_id.rc == 0) - *schid = match_id.schid; - return match_id.rc; -} - -extern void do_reipl_asm(__u32 schid); - -/* Make sure all subchannels are quiet before we re-ipl an lpar. */ -void reipl_ccw_dev(struct ccw_dev_id *devid) -{ - struct subchannel_id uninitialized_var(schid); - - s390_reset_system(); - if (reipl_find_schid(devid, &schid) != 0) - panic("IPL Device not found\n"); - do_reipl_asm(*((__u32*)&schid)); -} - -int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) -{ - static struct chsc_sda_area sda_area __initdata; - struct subchannel_id schid; - struct schib schib; - - schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; - if (!schid.one) - return -ENODEV; - - if (schid.ssid) { - /* - * Firmware should have already enabled MSS but whoever started - * the kernel might have initiated a channel subsystem reset. - * Ensure that MSS is enabled. - */ - memset(&sda_area, 0, sizeof(sda_area)); - if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS)) - return -ENODEV; - } - if (stsch(schid, &schib)) - return -ENODEV; - if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) - return -ENODEV; - if (!schib.pmcw.dnv) - return -ENODEV; - - iplinfo->ssid = schid.ssid; - iplinfo->devno = schib.pmcw.dev; - iplinfo->is_qdio = schib.pmcw.qf; - return 0; -} - /** * cio_tm_start_key - perform start function * @sch: subchannel on which to perform the start function diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index f50ea035aa9b..1540229a37bb 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1073,8 +1073,7 @@ out_schedule: return 0; } -static int -io_subchannel_remove (struct subchannel *sch) +static int io_subchannel_remove(struct subchannel *sch) { struct io_subchannel_private *io_priv = to_io_private(sch); struct ccw_device *cdev; @@ -1082,14 +1081,12 @@ io_subchannel_remove (struct subchannel *sch) cdev = sch_get_cdev(sch); if (!cdev) goto out_free; - io_subchannel_quiesce(sch); - /* Set ccw device to not operational and drop reference. */ - spin_lock_irq(cdev->ccwlock); + + ccw_device_unregister(cdev); + spin_lock_irq(sch->lock); sch_set_cdev(sch, NULL); set_io_private(sch, NULL); - cdev->private->state = DEV_STATE_NOT_OPER; - spin_unlock_irq(cdev->ccwlock); - ccw_device_unregister(cdev); + spin_unlock_irq(sch->lock); out_free: kfree(io_priv); sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); @@ -1721,6 +1718,7 @@ static int ccw_device_remove(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = cdev->drv; + struct subchannel *sch; int ret; if (cdrv->remove) @@ -1746,7 +1744,9 @@ static int ccw_device_remove(struct device *dev) ccw_device_set_timeout(cdev, 0); cdev->drv = NULL; cdev->private->int_class = IRQIO_CIO; + sch = to_subchannel(cdev->dev.parent); spin_unlock_irq(cdev->ccwlock); + io_subchannel_quiesce(sch); __disable_cmf(cdev); return 0; diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 75ce12a24dc2..aecfebb74157 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -460,8 +460,8 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev) * On success return a newly allocated copy of the channel-path description * data associated with the given channel path. Return %NULL on error. */ -struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev, - int chp_idx) +struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev, + int chp_idx) { struct subchannel *sch; struct chp_id chpid; diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c index 4fa9ee1d09fa..14d328338ce2 100644 --- a/drivers/s390/cio/ioasm.c +++ b/drivers/s390/cio/ioasm.c @@ -183,30 +183,6 @@ int chsc(void *chsc_area) } EXPORT_SYMBOL(chsc); -static inline int __rchp(struct chp_id chpid) -{ - register struct chp_id reg1 asm ("1") = chpid; - int ccode; - - asm volatile( - " lr 1,%1\n" - " rchp\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) : "d" (reg1) : "cc"); - return ccode; -} - -int rchp(struct chp_id chpid) -{ - int ccode; - - ccode = __rchp(chpid); - trace_s390_cio_rchp(chpid, ccode); - - return ccode; -} - static inline int __rsch(struct subchannel_id schid) { register struct subchannel_id reg1 asm("1") = schid; diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 35ad4ddd61e0..4be539cb9adc 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -20,7 +20,6 @@ int ssch(struct subchannel_id schid, union orb *addr); int csch(struct subchannel_id schid); int tpi(struct tpi_info *addr); int chsc(void *chsc_area); -int rchp(struct chp_id chpid); int rsch(struct subchannel_id schid); int hsch(struct subchannel_id schid); int xsch(struct subchannel_id schid); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index d5b02de02a3a..f4ca72dd862f 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -98,22 +98,6 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask, return cc; } -static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) -{ - /* all done or next buffer state different */ - if (ccq == 0 || ccq == 32) - return 0; - /* no buffer processed */ - if (ccq == 97) - return 1; - /* not all buffers processed */ - if (ccq == 96) - return 2; - /* notify devices immediately */ - DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); - return -EIO; -} - /** * qdio_do_eqbs - extract buffer states for QEBSM * @q: queue to manipulate @@ -128,7 +112,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, int start, int count, int auto_ack) { - int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; + int tmp_count = count, tmp_start = start, nr = q->nr; unsigned int ccq = 0; qperf_inc(q, eqbs); @@ -138,34 +122,30 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, again: ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, auto_ack); - rc = qdio_check_ccq(q, ccq); - if (!rc) - return count - tmp_count; - if (rc == 1) { - DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); - goto again; - } - - if (rc == 2) { + switch (ccq) { + case 0: + case 32: + /* all done, or next buffer state different */ + return count - tmp_count; + case 96: + /* not all buffers processed */ qperf_inc(q, eqbs_partial); DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", tmp_count); - /* - * Retry once, if that fails bail out and process the - * extracted buffers before trying again. - */ - if (!retried++) - goto again; - else - return count - tmp_count; + return count - tmp_count; + case 97: + /* no buffer processed */ + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); + goto again; + default: + DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); + DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr, + q->first_to_kick, count, q->irq_ptr->int_parm); + return 0; } - - DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); - DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); - q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, - q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); - return 0; } /** @@ -185,7 +165,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, unsigned int ccq = 0; int tmp_count = count, tmp_start = start; int nr = q->nr; - int rc; if (!count) return 0; @@ -195,26 +174,32 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, nr += q->irq_ptr->nr_input_qs; again: ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); - rc = qdio_check_ccq(q, ccq); - if (!rc) { + + switch (ccq) { + case 0: + case 32: + /* all done, or active buffer adapter-owned */ WARN_ON_ONCE(tmp_count); return count - tmp_count; - } - - if (rc == 1 || rc == 2) { + case 96: + /* not all buffers processed */ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); qperf_inc(q, sqbs_partial); goto again; + default: + DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); + DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr, + q->first_to_kick, count, q->irq_ptr->int_parm); + return 0; } - - DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); - DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); - q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, - q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); - return 0; } -/* returns number of examined buffers and their common state in *state */ +/* + * Returns number of examined buffers and their common state in *state. + * Requested number of buffers-to-examine must be > 0. + */ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, unsigned char *state, unsigned int count, int auto_ack, int merge_pending) @@ -225,17 +210,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, if (is_qebsm(q)) return qdio_do_eqbs(q, state, bufnr, count, auto_ack); - for (i = 0; i < count; i++) { - if (!__state) { - __state = q->slsb.val[bufnr]; - if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) - __state = SLSB_P_OUTPUT_EMPTY; - } else if (merge_pending) { - if ((q->slsb.val[bufnr] & __state) != __state) - break; - } else if (q->slsb.val[bufnr] != __state) - break; + /* get initial state: */ + __state = q->slsb.val[bufnr]; + if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) + __state = SLSB_P_OUTPUT_EMPTY; + + for (i = 1; i < count; i++) { bufnr = next_buf(bufnr); + + /* merge PENDING into EMPTY: */ + if (merge_pending && + q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING && + __state == SLSB_P_OUTPUT_EMPTY) + continue; + + /* stop if next state differs from initial state: */ + if (q->slsb.val[bufnr] != __state) + break; } *state = __state; return i; @@ -502,8 +493,8 @@ static inline void inbound_primed(struct qdio_q *q, int count) static int get_inbound_buffer_frontier(struct qdio_q *q) { - int count, stop; unsigned char state = 0; + int count; q->timestamp = get_tod_clock_fast(); @@ -512,9 +503,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) * would return 0. */ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); - stop = add_buf(q->first_to_check, count); - - if (q->first_to_check == stop) + if (!count) goto out; /* @@ -734,8 +723,8 @@ void qdio_inbound_processing(unsigned long data) static int get_outbound_buffer_frontier(struct qdio_q *q) { - int count, stop; unsigned char state = 0; + int count; q->timestamp = get_tod_clock_fast(); @@ -751,11 +740,11 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) * would return 0. */ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); - stop = add_buf(q->first_to_check, count); - if (q->first_to_check == stop) + if (!count) goto out; - count = get_buf_states(q, q->first_to_check, &state, count, 0, 1); + count = get_buf_states(q, q->first_to_check, &state, count, 0, + q->u.out.use_cq); if (!count) goto out; @@ -1218,8 +1207,10 @@ no_cleanup: qdio_shutdown_thinint(irq_ptr); /* restore interrupt handler */ - if ((void *)cdev->handler == (void *)qdio_int_handler) + if ((void *)cdev->handler == (void *)qdio_int_handler) { cdev->handler = irq_ptr->orig_handler; + cdev->private->intparm = 0; + } spin_unlock_irq(get_ccwdev_lock(cdev)); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 98f3cfdc0d02..439991d71b14 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -507,8 +507,10 @@ int qdio_setup_irq(struct qdio_initialize *init_data) irq_ptr->aqueue = *ciw; /* set new interrupt handler */ + spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev)); irq_ptr->orig_handler = init_data->cdev->handler; init_data->cdev->handler = qdio_int_handler; + spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); return 0; out_err: qdio_release_memory(irq_ptr); diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index c30420c517b1..ff6963ad6e39 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -124,6 +124,11 @@ static void fsm_io_request(struct vfio_ccw_private *private, if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) { orb = (union orb *)io_region->orb_area; + /* Don't try to build a cp if transport mode is specified. */ + if (orb->tm.b) { + io_region->ret_code = -EOPNOTSUPP; + goto err_out; + } io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), orb); if (io_region->ret_code) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 48d55dc9e986..35a0c2b52f82 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -25,7 +25,6 @@ #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/suspend.h> -#include <asm/reset.h> #include <asm/airq.h> #include <linux/atomic.h> #include <asm/isc.h> @@ -1197,26 +1196,7 @@ static void ap_config_timeout(struct timer_list *unused) queue_work(system_long_wq, &ap_scan_work); } -static void ap_reset_all(void) -{ - int i, j; - - for (i = 0; i < AP_DOMAINS; i++) { - if (!ap_test_config_domain(i)) - continue; - for (j = 0; j < AP_DEVICES; j++) { - if (!ap_test_config_card_id(j)) - continue; - ap_rapq(AP_MKQID(j, i)); - } - } -} - -static struct reset_call ap_reset_call = { - .fn = ap_reset_all, -}; - -int __init ap_debug_init(void) +static int __init ap_debug_init(void) { ap_dbf_info = debug_register("ap", 1, 1, DBF_MAX_SPRINTF_ARGS * sizeof(long)); @@ -1226,17 +1206,12 @@ int __init ap_debug_init(void) return 0; } -void ap_debug_exit(void) -{ - debug_unregister(ap_dbf_info); -} - /** * ap_module_init(): The module initialization code. * * Initializes the module. */ -int __init ap_module_init(void) +static int __init ap_module_init(void) { int max_domain_id; int rc, i; @@ -1274,8 +1249,6 @@ int __init ap_module_init(void) ap_airq_flag = (rc == 0); } - register_reset_call(&ap_reset_call); - /* Create /sys/bus/ap. */ rc = bus_register(&ap_bus_type); if (rc) @@ -1331,7 +1304,6 @@ out_bus: bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_unregister(&ap_bus_type); out: - unregister_reset_call(&ap_reset_call); if (ap_using_interrupts()) unregister_adapter_interrupt(&ap_airq); kfree(ap_configuration); diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index e0827eaa42f1..02184cf35834 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -17,7 +17,7 @@ #include <linux/types.h> #include <asm/ap.h> -#define AP_DEVICES 64 /* Number of AP devices. */ +#define AP_DEVICES 256 /* Number of AP devices. */ #define AP_DOMAINS 256 /* Number of AP domains. */ #define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */ #define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ @@ -240,7 +240,4 @@ void ap_queue_resume(struct ap_device *ap_dev); struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, int comp_device_type, unsigned int functions); -int ap_module_init(void); -void ap_module_exit(void); - #endif /* _AP_BUS_H_ */ diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h index 6a9d77c75ec3..dc675eb5aef6 100644 --- a/drivers/s390/crypto/ap_debug.h +++ b/drivers/s390/crypto/ap_debug.h @@ -23,7 +23,4 @@ extern debug_info_t *ap_dbf_info; -int ap_debug_init(void); -void ap_debug_exit(void); - #endif /* AP_DEBUG_H */ diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index e7c2e4f9529a..ed80d00cdb6f 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -889,7 +889,7 @@ int pkey_findcard(const struct pkey_seckey *seckey, u16 *pcardnr, u16 *pdomain, int verify) { struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; - struct zcrypt_device_matrix *device_matrix; + struct zcrypt_device_status_ext *device_status; u16 card, dom; u64 mkvp[2]; int i, rc, oi = -1; @@ -899,18 +899,19 @@ int pkey_findcard(const struct pkey_seckey *seckey, return -EINVAL; /* fetch status of all crypto cards */ - device_matrix = kmalloc(sizeof(struct zcrypt_device_matrix), + device_status = kmalloc(MAX_ZDEV_ENTRIES_EXT + * sizeof(struct zcrypt_device_status_ext), GFP_KERNEL); - if (!device_matrix) + if (!device_status) return -ENOMEM; - zcrypt_device_status_mask(device_matrix); + zcrypt_device_status_mask_ext(device_status); /* walk through all crypto cards */ - for (i = 0; i < MAX_ZDEV_ENTRIES; i++) { - card = AP_QID_CARD(device_matrix->device[i].qid); - dom = AP_QID_QUEUE(device_matrix->device[i].qid); - if (device_matrix->device[i].online && - device_matrix->device[i].functions & 0x04) { + for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + card = AP_QID_CARD(device_status[i].qid); + dom = AP_QID_QUEUE(device_status[i].qid); + if (device_status[i].online && + device_status[i].functions & 0x04) { /* an enabled CCA Coprocessor card */ /* try cached mkvp */ if (mkvp_cache_fetch(card, dom, mkvp) == 0 && @@ -930,14 +931,14 @@ int pkey_findcard(const struct pkey_seckey *seckey, mkvp_cache_scrub(card, dom); } } - if (i >= MAX_ZDEV_ENTRIES) { + if (i >= MAX_ZDEV_ENTRIES_EXT) { /* nothing found, so this time without cache */ - for (i = 0; i < MAX_ZDEV_ENTRIES; i++) { - if (!(device_matrix->device[i].online && - device_matrix->device[i].functions & 0x04)) + for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + if (!(device_status[i].online && + device_status[i].functions & 0x04)) continue; - card = AP_QID_CARD(device_matrix->device[i].qid); - dom = AP_QID_QUEUE(device_matrix->device[i].qid); + card = AP_QID_CARD(device_status[i].qid); + dom = AP_QID_QUEUE(device_status[i].qid); /* fresh fetch mkvp from adapter */ if (fetch_mkvp(card, dom, mkvp) == 0) { mkvp_cache_update(card, dom, mkvp); @@ -947,13 +948,13 @@ int pkey_findcard(const struct pkey_seckey *seckey, oi = i; } } - if (i >= MAX_ZDEV_ENTRIES && oi >= 0) { + if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) { /* old mkvp matched, use this card then */ - card = AP_QID_CARD(device_matrix->device[oi].qid); - dom = AP_QID_QUEUE(device_matrix->device[oi].qid); + card = AP_QID_CARD(device_status[oi].qid); + dom = AP_QID_QUEUE(device_status[oi].qid); } } - if (i < MAX_ZDEV_ENTRIES || oi >= 0) { + if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) { if (pcardnr) *pcardnr = card; if (pdomain) @@ -962,7 +963,7 @@ int pkey_findcard(const struct pkey_seckey *seckey, } else rc = -ENODEV; - kfree(device_matrix); + kfree(device_status); return rc; } EXPORT_SYMBOL(pkey_findcard); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index ce15f101ee28..5efd84862ccb 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -18,8 +18,6 @@ #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/fs.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> #include <linux/compat.h> #include <linux/slab.h> #include <linux/atomic.h> @@ -607,19 +605,24 @@ out: return rc; } -void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) +static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) { struct zcrypt_card *zc; struct zcrypt_queue *zq; struct zcrypt_device_status *stat; + int card, queue; + + memset(devstatus, 0, MAX_ZDEV_ENTRIES + * sizeof(struct zcrypt_device_status)); - memset(matrix, 0, sizeof(*matrix)); spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { - stat = matrix->device; - stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS; - stat += AP_QID_QUEUE(zq->queue->qid); + card = AP_QID_CARD(zq->queue->qid); + if (card >= MAX_ZDEV_CARDIDS) + continue; + queue = AP_QID_QUEUE(zq->queue->qid); + stat = &devstatus[card * AP_DOMAINS + queue]; stat->hwtype = zc->card->ap_dev.device_type; stat->functions = zc->card->functions >> 26; stat->qid = zq->queue->qid; @@ -628,40 +631,70 @@ void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) } spin_unlock(&zcrypt_list_lock); } -EXPORT_SYMBOL(zcrypt_device_status_mask); -static void zcrypt_status_mask(char status[AP_DEVICES]) +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) { struct zcrypt_card *zc; struct zcrypt_queue *zq; + struct zcrypt_device_status_ext *stat; + int card, queue; + + memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT + * sizeof(struct zcrypt_device_status_ext)); - memset(status, 0, sizeof(char) * AP_DEVICES); spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + card = AP_QID_CARD(zq->queue->qid); + queue = AP_QID_QUEUE(zq->queue->qid); + stat = &devstatus[card * AP_DOMAINS + queue]; + stat->hwtype = zc->card->ap_dev.device_type; + stat->functions = zc->card->functions >> 26; + stat->qid = zq->queue->qid; + stat->online = zq->online ? 0x01 : 0x00; + } + } + spin_unlock(&zcrypt_list_lock); +} +EXPORT_SYMBOL(zcrypt_device_status_mask_ext); + +static void zcrypt_status_mask(char status[], size_t max_adapters) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + int card; + + memset(status, 0, max_adapters); + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + card = AP_QID_CARD(zq->queue->qid); + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index + || card >= max_adapters) continue; - status[AP_QID_CARD(zq->queue->qid)] = - zc->online ? zc->user_space_type : 0x0d; + status[card] = zc->online ? zc->user_space_type : 0x0d; } } spin_unlock(&zcrypt_list_lock); } -static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) +static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) { struct zcrypt_card *zc; struct zcrypt_queue *zq; + int card; - memset(qdepth, 0, sizeof(char) * AP_DEVICES); + memset(qdepth, 0, max_adapters); spin_lock(&zcrypt_list_lock); local_bh_disable(); for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + card = AP_QID_CARD(zq->queue->qid); + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index + || card >= max_adapters) continue; spin_lock(&zq->queue->lock); - qdepth[AP_QID_CARD(zq->queue->qid)] = + qdepth[card] = zq->queue->pendingq_count + zq->queue->requestq_count; spin_unlock(&zq->queue->lock); @@ -671,21 +704,23 @@ static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) spin_unlock(&zcrypt_list_lock); } -static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) +static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) { struct zcrypt_card *zc; struct zcrypt_queue *zq; + int card; - memset(reqcnt, 0, sizeof(int) * AP_DEVICES); + memset(reqcnt, 0, sizeof(int) * max_adapters); spin_lock(&zcrypt_list_lock); local_bh_disable(); for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + card = AP_QID_CARD(zq->queue->qid); + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index + || card >= max_adapters) continue; spin_lock(&zq->queue->lock); - reqcnt[AP_QID_CARD(zq->queue->qid)] = - zq->queue->total_request_count; + reqcnt[card] = zq->queue->total_request_count; spin_unlock(&zq->queue->lock); } } @@ -739,60 +774,10 @@ static int zcrypt_requestq_count(void) return requestq_count; } -static int zcrypt_count_type(int type) -{ - struct zcrypt_card *zc; - struct zcrypt_queue *zq; - int device_count; - - device_count = 0; - spin_lock(&zcrypt_list_lock); - for_each_zcrypt_card(zc) { - if (zc->card->id != type) - continue; - for_each_zcrypt_queue(zq, zc) { - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) - continue; - device_count++; - } - } - spin_unlock(&zcrypt_list_lock); - return device_count; -} - -/** - * zcrypt_ica_status(): Old, depracted combi status call. - * - * Old, deprecated combi status call. - */ -static long zcrypt_ica_status(struct file *filp, unsigned long arg) -{ - struct ica_z90_status *pstat; - int ret; - - pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); - if (!pstat) - return -ENOMEM; - pstat->totalcount = zcrypt_device_count; - pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); - pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); - pstat->requestqWaitCount = zcrypt_requestq_count(); - pstat->pendingqWaitCount = zcrypt_pendingq_count(); - pstat->totalOpenCount = atomic_read(&zcrypt_open_count); - pstat->cryptoDomain = ap_domain_index; - zcrypt_status_mask(pstat->status); - zcrypt_qdepth_mask(pstat->qdepth); - ret = 0; - if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) - ret = -EFAULT; - kfree(pstat); - return ret; -} - static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - int rc; + int rc = 0; switch (cmd) { case ICARSAMODEXPO: { @@ -871,48 +856,48 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; return rc; } - case ZDEVICESTATUS: { - struct zcrypt_device_matrix *device_status; + case ZCRYPT_DEVICE_STATUS: { + struct zcrypt_device_status_ext *device_status; + size_t total_size = MAX_ZDEV_ENTRIES_EXT + * sizeof(struct zcrypt_device_status_ext); - device_status = kzalloc(sizeof(struct zcrypt_device_matrix), - GFP_KERNEL); + device_status = kzalloc(total_size, GFP_KERNEL); if (!device_status) return -ENOMEM; - - zcrypt_device_status_mask(device_status); - + zcrypt_device_status_mask_ext(device_status); if (copy_to_user((char __user *) arg, device_status, - sizeof(struct zcrypt_device_matrix))) { - kfree(device_status); - return -EFAULT; - } - + total_size)) + rc = -EFAULT; kfree(device_status); - return 0; + return rc; } - case Z90STAT_STATUS_MASK: { + case ZCRYPT_STATUS_MASK: { char status[AP_DEVICES]; - zcrypt_status_mask(status); - if (copy_to_user((char __user *) arg, status, - sizeof(char) * AP_DEVICES)) + + zcrypt_status_mask(status, AP_DEVICES); + if (copy_to_user((char __user *) arg, status, sizeof(status))) return -EFAULT; return 0; } - case Z90STAT_QDEPTH_MASK: { + case ZCRYPT_QDEPTH_MASK: { char qdepth[AP_DEVICES]; - zcrypt_qdepth_mask(qdepth); - if (copy_to_user((char __user *) arg, qdepth, - sizeof(char) * AP_DEVICES)) + + zcrypt_qdepth_mask(qdepth, AP_DEVICES); + if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) return -EFAULT; return 0; } - case Z90STAT_PERDEV_REQCNT: { - int reqcnt[AP_DEVICES]; - zcrypt_perdev_reqcnt(reqcnt); - if (copy_to_user((int __user *) arg, reqcnt, - sizeof(int) * AP_DEVICES)) - return -EFAULT; - return 0; + case ZCRYPT_PERDEV_REQCNT: { + int *reqcnt; + + reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL); + if (!reqcnt) + return -ENOMEM; + zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); + if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) + rc = -EFAULT; + kfree(reqcnt); + return rc; } case Z90STAT_REQUESTQ_COUNT: return put_user(zcrypt_requestq_count(), (int __user *) arg); @@ -924,38 +909,54 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, case Z90STAT_DOMAIN_INDEX: return put_user(ap_domain_index, (int __user *) arg); /* - * Deprecated ioctls. Don't add another device count ioctl, - * you can count them yourself in the user space with the - * output of the Z90STAT_STATUS_MASK ioctl. + * Deprecated ioctls */ - case ICAZ90STATUS: - return zcrypt_ica_status(filp, arg); - case Z90STAT_TOTALCOUNT: - return put_user(zcrypt_device_count, (int __user *) arg); - case Z90STAT_PCICACOUNT: - return put_user(zcrypt_count_type(ZCRYPT_PCICA), - (int __user *) arg); - case Z90STAT_PCICCCOUNT: - return put_user(zcrypt_count_type(ZCRYPT_PCICC), - (int __user *) arg); - case Z90STAT_PCIXCCMCL2COUNT: - return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), - (int __user *) arg); - case Z90STAT_PCIXCCMCL3COUNT: - return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), - (int __user *) arg); - case Z90STAT_PCIXCCCOUNT: - return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + - zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), - (int __user *) arg); - case Z90STAT_CEX2CCOUNT: - return put_user(zcrypt_count_type(ZCRYPT_CEX2C), - (int __user *) arg); - case Z90STAT_CEX2ACOUNT: - return put_user(zcrypt_count_type(ZCRYPT_CEX2A), - (int __user *) arg); + case ZDEVICESTATUS: { + /* the old ioctl supports only 64 adapters */ + struct zcrypt_device_status *device_status; + size_t total_size = MAX_ZDEV_ENTRIES + * sizeof(struct zcrypt_device_status); + + device_status = kzalloc(total_size, GFP_KERNEL); + if (!device_status) + return -ENOMEM; + zcrypt_device_status_mask(device_status); + if (copy_to_user((char __user *) arg, device_status, + total_size)) + rc = -EFAULT; + kfree(device_status); + return rc; + } + case Z90STAT_STATUS_MASK: { + /* the old ioctl supports only 64 adapters */ + char status[MAX_ZDEV_CARDIDS]; + + zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); + if (copy_to_user((char __user *) arg, status, sizeof(status))) + return -EFAULT; + return 0; + } + case Z90STAT_QDEPTH_MASK: { + /* the old ioctl supports only 64 adapters */ + char qdepth[MAX_ZDEV_CARDIDS]; + + zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); + if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) + return -EFAULT; + return 0; + } + case Z90STAT_PERDEV_REQCNT: { + /* the old ioctl supports only 64 adapters */ + int reqcnt[MAX_ZDEV_CARDIDS]; + + zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); + if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) + return -EFAULT; + return 0; + } + /* unknown ioctl number */ default: - /* unknown ioctl number */ + ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd); return -ENOIOCTLCMD; } } @@ -1152,201 +1153,6 @@ static struct miscdevice zcrypt_misc_device = { .fops = &zcrypt_fops, }; -/* - * Deprecated /proc entry support. - */ -static struct proc_dir_entry *zcrypt_entry; - -static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) -{ - int i; - - for (i = 0; i < len; i++) - seq_printf(m, "%01x", (unsigned int) addr[i]); - seq_putc(m, ' '); -} - -static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) -{ - int inl, c, cx; - - seq_printf(m, " "); - inl = 0; - for (c = 0; c < (len / 16); c++) { - sprintcl(m, addr+inl, 16); - inl += 16; - } - cx = len%16; - if (cx) { - sprintcl(m, addr+inl, cx); - inl += cx; - } - seq_putc(m, '\n'); -} - -static void sprinthx(unsigned char *title, struct seq_file *m, - unsigned char *addr, unsigned int len) -{ - int inl, r, rx; - - seq_printf(m, "\n%s\n", title); - inl = 0; - for (r = 0; r < (len / 64); r++) { - sprintrw(m, addr+inl, 64); - inl += 64; - } - rx = len % 64; - if (rx) { - sprintrw(m, addr+inl, rx); - inl += rx; - } - seq_putc(m, '\n'); -} - -static void sprinthx4(unsigned char *title, struct seq_file *m, - unsigned int *array, unsigned int len) -{ - seq_printf(m, "\n%s\n", title); - seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false); - seq_putc(m, '\n'); -} - -static int zcrypt_proc_show(struct seq_file *m, void *v) -{ - char workarea[sizeof(int) * AP_DEVICES]; - - seq_printf(m, "\nzcrypt version: %d.%d.%d\n", - ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); - seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); - seq_printf(m, "Total device count: %d\n", zcrypt_device_count); - seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); - seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); - seq_printf(m, "PCIXCC MCL2 count: %d\n", - zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); - seq_printf(m, "PCIXCC MCL3 count: %d\n", - zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); - seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); - seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); - seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); - seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); - seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); - seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); - seq_printf(m, "Total open handles: %d\n\n", - atomic_read(&zcrypt_open_count)); - zcrypt_status_mask(workarea); - sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " - "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", - m, workarea, AP_DEVICES); - zcrypt_qdepth_mask(workarea); - sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); - zcrypt_perdev_reqcnt((int *) workarea); - sprinthx4("Per-device successfully completed request counts", - m, (unsigned int *) workarea, AP_DEVICES); - return 0; -} - -static int zcrypt_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, zcrypt_proc_show, NULL); -} - -static void zcrypt_disable_card(int index) -{ - struct zcrypt_card *zc; - struct zcrypt_queue *zq; - - spin_lock(&zcrypt_list_lock); - for_each_zcrypt_card(zc) { - for_each_zcrypt_queue(zq, zc) { - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) - continue; - zq->online = 0; - ap_flush_queue(zq->queue); - } - } - spin_unlock(&zcrypt_list_lock); -} - -static void zcrypt_enable_card(int index) -{ - struct zcrypt_card *zc; - struct zcrypt_queue *zq; - - spin_lock(&zcrypt_list_lock); - for_each_zcrypt_card(zc) { - for_each_zcrypt_queue(zq, zc) { - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) - continue; - zq->online = 1; - ap_flush_queue(zq->queue); - } - } - spin_unlock(&zcrypt_list_lock); -} - -static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, - size_t count, loff_t *pos) -{ - unsigned char *lbuf, *ptr; - size_t local_count; - int j; - - if (count <= 0) - return 0; - -#define LBUFSIZE 1200UL - lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); - if (!lbuf) - return 0; - - local_count = min(LBUFSIZE - 1, count); - if (copy_from_user(lbuf, buffer, local_count) != 0) { - kfree(lbuf); - return -EFAULT; - } - lbuf[local_count] = '\0'; - - ptr = strstr(lbuf, "Online devices"); - if (!ptr) - goto out; - ptr = strstr(ptr, "\n"); - if (!ptr) - goto out; - ptr++; - - if (strstr(ptr, "Waiting work element counts") == NULL) - goto out; - - for (j = 0; j < 64 && *ptr; ptr++) { - /* - * '0' for no device, '1' for PCICA, '2' for PCICC, - * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, - * '5' for CEX2C and '6' for CEX2A' - * '7' for CEX3C and '8' for CEX3A - */ - if (*ptr >= '0' && *ptr <= '8') - j++; - else if (*ptr == 'd' || *ptr == 'D') - zcrypt_disable_card(j++); - else if (*ptr == 'e' || *ptr == 'E') - zcrypt_enable_card(j++); - else if (*ptr != ' ' && *ptr != '\t') - break; - } -out: - kfree(lbuf); - return count; -} - -static const struct file_operations zcrypt_proc_fops = { - .owner = THIS_MODULE, - .open = zcrypt_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = zcrypt_proc_write, -}; - static int zcrypt_rng_device_count; static u32 *zcrypt_rng_buffer; static int zcrypt_rng_buffer_index; @@ -1448,27 +1254,15 @@ int __init zcrypt_api_init(void) if (rc) goto out; - atomic_set(&zcrypt_rescan_req, 0); - /* Register the request sprayer. */ rc = misc_register(&zcrypt_misc_device); if (rc < 0) goto out; - /* Set up the proc file system */ - zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, - &zcrypt_proc_fops); - if (!zcrypt_entry) { - rc = -ENOMEM; - goto out_misc; - } - zcrypt_msgtype6_init(); zcrypt_msgtype50_init(); return 0; -out_misc: - misc_deregister(&zcrypt_misc_device); out: return rc; } @@ -1480,7 +1274,6 @@ out: */ void __exit zcrypt_api_exit(void) { - remove_proc_entry("driver/z90crypt", NULL); misc_deregister(&zcrypt_misc_device); zcrypt_msgtype6_exit(); zcrypt_msgtype50_exit(); diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 9fff8912f6e3..f149a8fee60d 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -21,30 +21,6 @@ #include <asm/zcrypt.h> #include "ap_bus.h" -/* deprecated status calls */ -#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status) -#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int) - -/** - * This structure is deprecated and the corresponding ioctl() has been - * replaced with individual ioctl()s for each piece of data! - */ -struct ica_z90_status { - int totalcount; - int leedslitecount; // PCICA - int leeds2count; // PCICC - // int PCIXCCCount; is not in struct for backward compatibility - int requestqWaitCount; - int pendingqWaitCount; - int totalOpenCount; - int cryptoDomain; - // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3, - // 5=CEX2C - unsigned char status[64]; - // qdepth: # work elements waiting for each device - unsigned char qdepth[64]; -}; - /** * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2, * PCIXCC_MCL3, CEX2C, or CEX2A @@ -179,6 +155,6 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); int zcrypt_api_init(void); void zcrypt_api_exit(void); long zcrypt_send_cprb(struct ica_xcRB *xcRB); -void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus); +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus); #endif /* _ZCRYPT_API_H_ */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 19203340f879..04fefa5bb08d 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1369,7 +1369,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card) static void qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; - struct channel_path_desc *chp_dsc; + struct channel_path_desc_fmt0 *chp_dsc; QETH_DBF_TEXT(SETUP, 2, "chp_desc"); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 50a313806dde..2ad6f12f3d49 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -21,7 +21,6 @@ #include <linux/list.h> #include <linux/hash.h> #include <linux/hashtable.h> -#include <linux/string.h> #include <asm/setup.h> #include "qeth_core.h" #include "qeth_l2.h" diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 3b0c8b8a7634..066b5c3aaae6 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -176,7 +176,7 @@ static struct device_driver smsg_driver = { static void __exit smsg_exit(void) { - cpcmd("SET SMSG IUCV", NULL, 0, NULL); + cpcmd("SET SMSG OFF", NULL, 0, NULL); device_unregister(smsg_dev); iucv_unregister(&smsg_handler, 1); driver_unregister(&smsg_driver); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index ca218c82321f..6162cf57a20a 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -961,7 +961,7 @@ static int zfcp_fc_exec_els_job(struct bsg_job *job, d_id = ntoh24(bsg_request->rqst_data.h_els.port_id); els->handler = zfcp_fc_ct_els_job_handler; - return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ); + return zfcp_fsf_send_els(adapter, d_id, els, job->timeout / HZ); } static int zfcp_fc_exec_ct_job(struct bsg_job *job, @@ -980,7 +980,7 @@ static int zfcp_fc_exec_ct_job(struct bsg_job *job, return ret; ct->handler = zfcp_fc_ct_job_handler; - ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ); + ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->timeout / HZ); if (ret) zfcp_fc_wka_port_put(wka_port); |