diff options
-rw-r--r-- | arch/s390/include/asm/cpu_mf.h | 10 | ||||
-rw-r--r-- | arch/s390/include/asm/perf_event.h | 2 | ||||
-rw-r--r-- | arch/s390/include/uapi/asm/zcrypt.h | 4 | ||||
-rw-r--r-- | arch/s390/kernel/perf_cpum_sf.c | 165 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 3 | ||||
-rw-r--r-- | drivers/s390/cio/ccwgroup.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/device_ops.c | 23 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.c | 12 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.h | 3 | ||||
-rw-r--r-- | drivers/s390/crypto/pkey_api.c | 113 | ||||
-rw-r--r-- | drivers/s390/crypto/vfio_ap_drv.c | 2 | ||||
-rw-r--r-- | drivers/s390/crypto/zcrypt_api.h | 3 | ||||
-rw-r--r-- | drivers/s390/crypto/zcrypt_cex4.c | 72 |
14 files changed, 334 insertions, 82 deletions
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index ae3e3221d4b5..ceeb552d3472 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -70,7 +70,7 @@ struct hws_qsi_info_block { /* Bit(s) */ unsigned long tear; /* 24-31: TEAR contents */ unsigned long dear; /* 32-39: DEAR contents */ unsigned int rsvrd0; /* 40-43: reserved */ - unsigned int cpu_speed; /* 44-47: CPU speed */ + unsigned int cpu_speed; /* 44-47: CPU speed */ unsigned long long rsvrd1; /* 48-55: reserved */ unsigned long long rsvrd2; /* 56-63: reserved */ } __packed; @@ -89,10 +89,10 @@ struct hws_lsctl_request_block { unsigned long tear; /* 16-23: TEAR contents */ unsigned long dear; /* 24-31: DEAR contents */ /* 32-63: */ - unsigned long rsvrd1; /* reserved */ - unsigned long rsvrd2; /* reserved */ - unsigned long rsvrd3; /* reserved */ - unsigned long rsvrd4; /* reserved */ + unsigned long rsvrd1; /* reserved */ + unsigned long rsvrd2; /* reserved */ + unsigned long rsvrd3; /* reserved */ + unsigned long rsvrd4; /* reserved */ } __packed; struct hws_basic_entry { diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 560d8f766ddf..4652ffffe0b2 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -60,6 +60,7 @@ struct perf_sf_sde_regs { #define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ PERF_CPUM_SF_DIAG_MODE) #define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */ +#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */ #define REG_NONE 0 #define REG_OVERFLOW 1 @@ -70,5 +71,6 @@ struct perf_sf_sde_regs { #define SAMPL_FLAGS(hwc) ((hwc)->config_base) #define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) #define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS) +#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) #endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index 8c5755f41dde..f9e5e1f0821d 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -4,7 +4,7 @@ * * zcrypt 2.2.1 (user-visible header) * - * Copyright IBM Corp. 2001, 2018 + * Copyright IBM Corp. 2001, 2019 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -286,7 +286,7 @@ struct zcrypt_device_matrix_ext { * 0x08: CEX3A * 0x0a: CEX4 * 0x0b: CEX5 - * 0x0c: CEX6 + * 0x0c: CEX6 and CEX7 * 0x0d: device is disabled * * ZCRYPT_QDEPTH_MASK diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 292a452cd1f3..544a02e944c6 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -673,13 +673,89 @@ out: rcu_read_unlock(); } +static unsigned long getrate(bool freq, unsigned long sample, + struct hws_qsi_info_block *si) +{ + unsigned long rate; + + if (freq) { + rate = freq_to_sample_rate(si, sample); + rate = hw_limit_rate(si, rate); + } else { + /* The min/max sampling rates specifies the valid range + * of sample periods. If the specified sample period is + * out of range, limit the period to the range boundary. + */ + rate = hw_limit_rate(si, sample); + + /* The perf core maintains a maximum sample rate that is + * configurable through the sysctl interface. Ensure the + * sampling rate does not exceed this value. This also helps + * to avoid throttling when pushing samples with + * perf_event_overflow(). + */ + if (sample_rate_to_freq(si, rate) > + sysctl_perf_event_sample_rate) { + debug_sprintf_event(sfdbg, 1, + "Sampling rate exceeds maximum " + "perf sample rate\n"); + rate = 0; + } + } + return rate; +} + +/* The sampling information (si) contains information about the + * min/max sampling intervals and the CPU speed. So calculate the + * correct sampling interval and avoid the whole period adjust + * feedback loop. + * + * Since the CPU Measurement sampling facility can not handle frequency + * calculate the sampling interval when frequency is specified using + * this formula: + * interval := cpu_speed * 1000000 / sample_freq + * + * Returns errno on bad input and zero on success with parameter interval + * set to the correct sampling rate. + * + * Note: This function turns off freq bit to avoid calling function + * perf_adjust_period(). This causes frequency adjustment in the common + * code part which causes tremendous variations in the counter values. + */ +static int __hw_perf_event_init_rate(struct perf_event *event, + struct hws_qsi_info_block *si) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + unsigned long rate; + + if (attr->freq) { + if (!attr->sample_freq) + return -EINVAL; + rate = getrate(attr->freq, attr->sample_freq, si); + attr->freq = 0; /* Don't call perf_adjust_period() */ + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE; + } else { + rate = getrate(attr->freq, attr->sample_period, si); + if (!rate) + return -EINVAL; + } + attr->sample_period = rate; + SAMPL_RATE(hwc) = rate; + hw_init_period(hwc, SAMPL_RATE(hwc)); + debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:" + "cpu:%d period:%llx freq:%d,%#lx\n", event->cpu, + event->attr.sample_period, event->attr.freq, + SAMPLE_FREQ_MODE(hwc)); + return 0; +} + static int __hw_perf_event_init(struct perf_event *event) { struct cpu_hw_sf *cpuhw; struct hws_qsi_info_block si; struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; - unsigned long rate; int cpu, err; /* Reserve CPU-measurement sampling facility */ @@ -745,43 +821,9 @@ static int __hw_perf_event_init(struct perf_event *event) if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; - /* The sampling information (si) contains information about the - * min/max sampling intervals and the CPU speed. So calculate the - * correct sampling interval and avoid the whole period adjust - * feedback loop. - */ - rate = 0; - if (attr->freq) { - if (!attr->sample_freq) { - err = -EINVAL; - goto out; - } - rate = freq_to_sample_rate(&si, attr->sample_freq); - rate = hw_limit_rate(&si, rate); - attr->freq = 0; - attr->sample_period = rate; - } else { - /* The min/max sampling rates specifies the valid range - * of sample periods. If the specified sample period is - * out of range, limit the period to the range boundary. - */ - rate = hw_limit_rate(&si, hwc->sample_period); - - /* The perf core maintains a maximum sample rate that is - * configurable through the sysctl interface. Ensure the - * sampling rate does not exceed this value. This also helps - * to avoid throttling when pushing samples with - * perf_event_overflow(). - */ - if (sample_rate_to_freq(&si, rate) > - sysctl_perf_event_sample_rate) { - err = -EINVAL; - debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n"); - goto out; - } - } - SAMPL_RATE(hwc) = rate; - hw_init_period(hwc, SAMPL_RATE(hwc)); + err = __hw_perf_event_init_rate(event, &si); + if (err) + goto out; /* Initialize sample data overflow accounting */ hwc->extra_reg.reg = REG_OVERFLOW; @@ -904,6 +946,8 @@ static void cpumsf_pmu_enable(struct pmu *pmu) if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) extend_sampling_buffer(&cpuhw->sfb, hwc); } + /* Rate may be adjusted with ioctl() */ + cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); } /* (Re)enable the PMU and sampling facility */ @@ -922,8 +966,9 @@ static void cpumsf_pmu_enable(struct pmu *pmu) lpp(&S390_lowcore.lpp); debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " - "tear=%p dear=%p\n", cpuhw->lsctl.es, - cpuhw->lsctl.cs, cpuhw->lsctl.ed, cpuhw->lsctl.cd, + "interval:%lx tear=%p dear=%p\n", + cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, + cpuhw->lsctl.cd, cpuhw->lsctl.interval, (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear); } @@ -1717,6 +1762,44 @@ static void cpumsf_pmu_read(struct perf_event *event) /* Nothing to do ... updates are interrupt-driven */ } +/* Check if the new sampling period/freqeuncy is appropriate. + * + * Return non-zero on error and zero on passed checks. + */ +static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) +{ + struct hws_qsi_info_block si; + unsigned long rate; + bool do_freq; + + memset(&si, 0, sizeof(si)); + if (event->cpu == -1) { + if (qsi(&si)) + return -ENODEV; + } else { + /* Event is pinned to a particular CPU, retrieve the per-CPU + * sampling structure for accessing the CPU-specific QSI. + */ + struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu); + + si = cpuhw->qsi; + } + + do_freq = !!SAMPLE_FREQ_MODE(&event->hw); + rate = getrate(do_freq, value, &si); + if (!rate) + return -EINVAL; + + event->attr.sample_period = rate; + SAMPL_RATE(&event->hw) = rate; + hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); + debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:" + "cpu:%d value:%llx period:%llx freq:%d\n", + event->cpu, value, + event->attr.sample_period, do_freq); + return 0; +} + /* Activate sampling control. * Next call of pmu_enable() starts sampling. */ @@ -1908,6 +1991,8 @@ static struct pmu cpumf_sampling = { .setup_aux = aux_buffer_setup, .free_aux = aux_buffer_free, + + .check_period = cpumsf_pmu_check_period, }; static void cpumf_measurement_alert(struct ext_code ext_code, diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 2db6fb405a9a..3627953007ed 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -311,7 +311,8 @@ int arch_update_cpu_topology(void) on_each_cpu(__arch_update_dedicated_flag, NULL, 0); for_each_online_cpu(cpu) { dev = get_cpu_device(cpu); - kobject_uevent(&dev->kobj, KOBJ_CHANGE); + if (dev) + kobject_uevent(&dev->kobj, KOBJ_CHANGE); } return rc; } diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 0005ec9285aa..b42a93736668 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, goto error; } /* Check for trailing stuff. */ - if (i == num_devices && strlen(buf) > 0) { + if (i == num_devices && buf && strlen(buf) > 0) { rc = -EINVAL; goto error; } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 22c55816100b..1fbfb0a93f5f 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1388,6 +1388,8 @@ device_initcall(cio_settle_init); int sch_is_pseudo_sch(struct subchannel *sch) { + if (!sch->dev.parent) + return 0; return sch == to_css(sch->dev.parent)->pseudo_subchannel; } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index d722458c5928..65841af15748 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -124,9 +124,7 @@ EXPORT_SYMBOL(ccw_device_is_multipath); /** * ccw_device_clear() - terminate I/O request processing * @cdev: target ccw device - * @intparm: interruption parameter; value is only used if no I/O is - * outstanding, otherwise the intparm associated with the I/O request - * is returned + * @intparm: interruption parameter to be returned upon conclusion of csch * * ccw_device_clear() calls csch on @cdev's subchannel. * Returns: @@ -179,6 +177,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) * completed during the time specified by @expires. If a timeout occurs, the * channel program is terminated via xsch, hsch or csch, and the device's * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -256,6 +257,9 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -287,6 +291,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -322,6 +329,9 @@ int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, * completed during the time specified by @expires. If a timeout occurs, the * channel program is terminated via xsch, hsch or csch, and the device's * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -343,11 +353,12 @@ int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, /** * ccw_device_halt() - halt I/O request processing * @cdev: target ccw device - * @intparm: interruption parameter; value is only used if no I/O is - * outstanding, otherwise the intparm associated with the I/O request - * is returned + * @intparm: interruption parameter to be returned upon conclusion of hsch * * ccw_device_halt() calls hsch on @cdev's subchannel. + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_clear(). * Returns: * %0 on success, * -%ENODEV on device not operational, diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index a76b8a8bcbbb..a1915061932e 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1322,24 +1322,24 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) /* < CEX2A is not supported */ if (rawtype < AP_DEVICE_TYPE_CEX2A) return 0; - /* up to CEX6 known and fully supported */ - if (rawtype <= AP_DEVICE_TYPE_CEX6) + /* up to CEX7 known and fully supported */ + if (rawtype <= AP_DEVICE_TYPE_CEX7) return rawtype; /* - * unknown new type > CEX6, check for compatibility + * unknown new type > CEX7, check for compatibility * to the highest known and supported type which is - * currently CEX6 with the help of the QACT function. + * currently CEX7 with the help of the QACT function. */ if (ap_qact_available()) { struct ap_queue_status status; union ap_qact_ap_info apinfo = {0}; apinfo.mode = (func >> 26) & 0x07; - apinfo.cat = AP_DEVICE_TYPE_CEX6; + apinfo.cat = AP_DEVICE_TYPE_CEX7; status = ap_qact(qid, 0, &apinfo); if (status.response_code == AP_RESPONSE_NORMAL && apinfo.cat >= AP_DEVICE_TYPE_CEX2A - && apinfo.cat <= AP_DEVICE_TYPE_CEX6) + && apinfo.cat <= AP_DEVICE_TYPE_CEX7) comp_type = apinfo.cat; } if (!comp_type) diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 6f3cf37776ca..433b7b64368d 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* - * Copyright IBM Corp. 2006, 2012 + * Copyright IBM Corp. 2006, 2019 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> @@ -63,6 +63,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) #define AP_DEVICE_TYPE_CEX4 10 #define AP_DEVICE_TYPE_CEX5 11 #define AP_DEVICE_TYPE_CEX6 12 +#define AP_DEVICE_TYPE_CEX7 13 /* * Known function facilities diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index f76a1d0f54c4..9de3d46b3253 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -1363,9 +1363,122 @@ static struct attribute_group ccadata_attr_group = { .bin_attrs = ccadata_attrs, }; +#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80) + +/* + * Sysfs attribute read function for all secure key ccacipher binary attributes. + * The implementation can not deal with partial reads, because a new random + * secure key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, + bool is_xts, char *buf, loff_t off, + size_t count) +{ + size_t keysize; + int rc; + + if (off != 0 || count < CCACIPHERTOKENSIZE) + return -EINVAL; + if (is_xts) + if (count < 2 * CCACIPHERTOKENSIZE) + return -EINVAL; + + keysize = CCACIPHERTOKENSIZE; + rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize); + if (rc) + return rc; + memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize); + + if (is_xts) { + keysize = CCACIPHERTOKENSIZE; + rc = cca_gencipherkey(-1, -1, keybits, 0, + buf + CCACIPHERTOKENSIZE, &keysize); + if (rc) + return rc; + memset(buf + CCACIPHERTOKENSIZE + keysize, 0, + CCACIPHERTOKENSIZE - keysize); + + return 2 * CCACIPHERTOKENSIZE; + } + + return CCACIPHERTOKENSIZE; +} + +static ssize_t ccacipher_aes_128_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_192_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_256_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf, + off, count); +} + +static ssize_t ccacipher_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf, + off, count); +} + +static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE); + +static struct bin_attribute *ccacipher_attrs[] = { + &bin_attr_ccacipher_aes_128, + &bin_attr_ccacipher_aes_192, + &bin_attr_ccacipher_aes_256, + &bin_attr_ccacipher_aes_128_xts, + &bin_attr_ccacipher_aes_256_xts, + NULL +}; + +static struct attribute_group ccacipher_attr_group = { + .name = "ccacipher", + .bin_attrs = ccacipher_attrs, +}; + static const struct attribute_group *pkey_attr_groups[] = { &protkey_attr_group, &ccadata_attr_group, + &ccacipher_attr_group, NULL, }; diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index 003662aa8060..be2520cc010b 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -36,6 +36,8 @@ static struct ap_device_id ap_queue_ids[] = { .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { .dev_type = AP_DEVICE_TYPE_CEX6, .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX7, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { /* end of sibling */ }, }; diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 2d3f2732344f..d464618cd84f 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* - * Copyright IBM Corp. 2001, 2018 + * Copyright IBM Corp. 2001, 2019 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * Cornelia Huck <cornelia.huck@de.ibm.com> @@ -29,6 +29,7 @@ #define ZCRYPT_CEX4 10 #define ZCRYPT_CEX5 11 #define ZCRYPT_CEX6 12 +#define ZCRYPT_CEX7 13 /** * Large random numbers are pulled in 4096 byte chunks from the crypto cards diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index f58d8dec19dc..442e3d6162f7 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright IBM Corp. 2012 + * Copyright IBM Corp. 2012, 2019 * Author(s): Holger Dengler <hd@linux.vnet.ibm.com> */ @@ -38,8 +38,8 @@ #define CEX4_CLEANUP_TIME (900*HZ) MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("CEX4/CEX5/CEX6 Cryptographic Card device driver, " \ - "Copyright IBM Corp. 2018"); +MODULE_DESCRIPTION("CEX4/CEX5/CEX6/CEX7 Cryptographic Card device driver, " \ + "Copyright IBM Corp. 2019"); MODULE_LICENSE("GPL"); static struct ap_device_id zcrypt_cex4_card_ids[] = { @@ -49,6 +49,8 @@ static struct ap_device_id zcrypt_cex4_card_ids[] = { .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, { .dev_type = AP_DEVICE_TYPE_CEX6, .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX7, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, { /* end of list */ }, }; @@ -61,6 +63,8 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = { .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { .dev_type = AP_DEVICE_TYPE_CEX6, .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX7, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { /* end of list */ }, }; @@ -146,7 +150,7 @@ static const struct attribute_group cca_queue_attr_group = { }; /** - * Probe function for CEX4/CEX5/CEX6 card device. It always + * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always * accepts the AP device since the bus_match already checked * the hardware type. * @ap_dev: pointer to the AP device. @@ -158,25 +162,31 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY */ static const int CEX4A_SPEED_IDX[] = { - 14, 19, 249, 42, 228, 1458, 0, 0}; + 14, 19, 249, 42, 228, 1458, 0, 0}; static const int CEX5A_SPEED_IDX[] = { - 8, 9, 20, 18, 66, 458, 0, 0}; + 8, 9, 20, 18, 66, 458, 0, 0}; static const int CEX6A_SPEED_IDX[] = { - 6, 9, 20, 17, 65, 438, 0, 0}; + 6, 9, 20, 17, 65, 438, 0, 0}; + static const int CEX7A_SPEED_IDX[] = { + 6, 8, 17, 15, 54, 362, 0, 0}; static const int CEX4C_SPEED_IDX[] = { 59, 69, 308, 83, 278, 2204, 209, 40}; static const int CEX5C_SPEED_IDX[] = { - 24, 31, 50, 37, 90, 479, 27, 10}; + 24, 31, 50, 37, 90, 479, 27, 10}; static const int CEX6C_SPEED_IDX[] = { - 16, 20, 32, 27, 77, 455, 23, 9}; + 16, 20, 32, 27, 77, 455, 24, 9}; + static const int CEX7C_SPEED_IDX[] = { + 14, 16, 26, 23, 64, 376, 23, 8}; static const int CEX4P_SPEED_IDX[] = { - 224, 313, 3560, 359, 605, 2827, 0, 50}; + 0, 0, 0, 0, 0, 0, 0, 50}; static const int CEX5P_SPEED_IDX[] = { - 63, 84, 156, 83, 142, 533, 0, 10}; + 0, 0, 0, 0, 0, 0, 0, 10}; static const int CEX6P_SPEED_IDX[] = { - 55, 70, 121, 73, 129, 522, 0, 9}; + 0, 0, 0, 0, 0, 0, 0, 9}; + static const int CEX7P_SPEED_IDX[] = { + 0, 0, 0, 0, 0, 0, 0, 8}; struct ap_card *ac = to_ap_card(&ap_dev->device); struct zcrypt_card *zc; @@ -198,11 +208,19 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX5; memcpy(zc->speed_rating, CEX5A_SPEED_IDX, sizeof(CEX5A_SPEED_IDX)); - } else { + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { zc->type_string = "CEX6A"; zc->user_space_type = ZCRYPT_CEX6; memcpy(zc->speed_rating, CEX6A_SPEED_IDX, sizeof(CEX6A_SPEED_IDX)); + } else { + zc->type_string = "CEX7A"; + /* wrong user space type, just for compatibility + * with the ZCRYPT_STATUS_MASK ioctl. + */ + zc->user_space_type = ZCRYPT_CEX6; + memcpy(zc->speed_rating, CEX7A_SPEED_IDX, + sizeof(CEX7A_SPEED_IDX)); } zc->min_mod_size = CEX4A_MIN_MOD_SIZE; if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && @@ -232,7 +250,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX3C; memcpy(zc->speed_rating, CEX5C_SPEED_IDX, sizeof(CEX5C_SPEED_IDX)); - } else { + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { zc->type_string = "CEX6C"; /* wrong user space type, must be CEX6 * just keep it for cca compatibility @@ -240,6 +258,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX3C; memcpy(zc->speed_rating, CEX6C_SPEED_IDX, sizeof(CEX6C_SPEED_IDX)); + } else { + zc->type_string = "CEX7C"; + /* wrong user space type, must be CEX7 + * just keep it for cca compatibility + */ + zc->user_space_type = ZCRYPT_CEX3C; + memcpy(zc->speed_rating, CEX7C_SPEED_IDX, + sizeof(CEX7C_SPEED_IDX)); } zc->min_mod_size = CEX4C_MIN_MOD_SIZE; zc->max_mod_size = CEX4C_MAX_MOD_SIZE; @@ -255,11 +281,19 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX5; memcpy(zc->speed_rating, CEX5P_SPEED_IDX, sizeof(CEX5P_SPEED_IDX)); - } else { + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { zc->type_string = "CEX6P"; zc->user_space_type = ZCRYPT_CEX6; memcpy(zc->speed_rating, CEX6P_SPEED_IDX, sizeof(CEX6P_SPEED_IDX)); + } else { + zc->type_string = "CEX7P"; + /* wrong user space type, just for compatibility + * with the ZCRYPT_STATUS_MASK ioctl. + */ + zc->user_space_type = ZCRYPT_CEX6; + memcpy(zc->speed_rating, CEX7P_SPEED_IDX, + sizeof(CEX7P_SPEED_IDX)); } zc->min_mod_size = CEX4C_MIN_MOD_SIZE; zc->max_mod_size = CEX4C_MAX_MOD_SIZE; @@ -289,8 +323,8 @@ out: } /** - * This is called to remove the CEX4/CEX5/CEX6 card driver information - * if an AP card device is removed. + * This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver + * information if an AP card device is removed. */ static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) { @@ -311,7 +345,7 @@ static struct ap_driver zcrypt_cex4_card_driver = { }; /** - * Probe function for CEX4/CEX5/CEX6 queue device. It always + * Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always * accepts the AP device since the bus_match already checked * the hardware type. * @ap_dev: pointer to the AP device. @@ -369,7 +403,7 @@ out: } /** - * This is called to remove the CEX4/CEX5/CEX6 queue driver + * This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver * information if an AP queue device is removed. */ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) |