diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-07 03:05:21 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-07 03:05:21 +0300 |
commit | 24cb958695724ffb4488ef4f65892c0767bcd2f2 (patch) | |
tree | 87fdf68a4a2b37ba855c99415c890d8c6d3218ef /drivers | |
parent | d77771c926671e0362af3fe792391be66072b242 (diff) | |
parent | 953503751a426413ea8aee2299ae3ee971b70d9b (diff) | |
download | linux-24cb958695724ffb4488ef4f65892c0767bcd2f2.tar.xz |
Merge tag 's390-5.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Alexander Gordeev:
- Rework copy_oldmem_page() callback to take an iov_iter.
This includes a few prerequisite updates and fixes to the oldmem
reading code.
- Rework cpufeature implementation to allow for various CPU feature
indications, which is not only limited to hardware capabilities, but
also allows CPU facilities.
- Use the cpufeature rework to autoload Ultravisor module when CPU
facility 158 is available.
- Add ELF note type for encrypted CPU state of a protected virtual CPU.
The zgetdump tool from s390-tools package will decrypt the CPU state
using a Customer Communication Key and overwrite respective notes to
make the data accessible for crash and other debugging tools.
- Use vzalloc() instead of vmalloc() + memset() in ChaCha20 crypto
test.
- Fix incorrect recovery of kretprobe modified return address in
stacktrace.
- Switch the NMI handler to use generic irqentry_nmi_enter() and
irqentry_nmi_exit() helper functions.
- Rework the cryptographic Adjunct Processors (AP) pass-through design
to support dynamic changes to the AP matrix of a running guest as
well as to implement more of the AP architecture.
- Minor boot code cleanups.
- Grammar and typo fixes to hmcdrv and tape drivers.
* tag 's390-5.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (46 commits)
Revert "s390/smp: enforce lowcore protection on CPU restart"
Revert "s390/smp: rework absolute lowcore access"
Revert "s390/smp,ptdump: add absolute lowcore markers"
s390/unwind: fix fgraph return address recovery
s390/nmi: use irqentry_nmi_enter()/irqentry_nmi_exit()
s390: add ELF note type for encrypted CPU state of a PV VCPU
s390/smp,ptdump: add absolute lowcore markers
s390/smp: rework absolute lowcore access
s390/setup: rearrange absolute lowcore initialization
s390/boot: cleanup adjust_to_uv_max() function
s390/smp: enforce lowcore protection on CPU restart
s390/tape: fix comment typo
s390/hmcdrv: fix Kconfig "its" grammar
s390/docs: fix warnings for vfio_ap driver doc
s390/docs: fix warnings for vfio_ap driver lock usage doc
s390/crash: support multi-segment iterators
s390/crash: use static swap buffer for copy_to_user_real()
s390/crash: move copy_to_user_real() to crash_dump.c
s390/zcore: fix race when reading from hardware system area
s390/crash: fix incorrect number of bytes to copy to user space
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/hw_random/s390-trng.c | 2 | ||||
-rw-r--r-- | drivers/s390/char/Kconfig | 2 | ||||
-rw-r--r-- | drivers/s390/char/tape_34xx.c | 2 | ||||
-rw-r--r-- | drivers/s390/char/uvdevice.c | 5 | ||||
-rw-r--r-- | drivers/s390/char/zcore.c | 55 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.c | 31 | ||||
-rw-r--r-- | drivers/s390/crypto/pkey_api.c | 2 | ||||
-rw-r--r-- | drivers/s390/crypto/vfio_ap_drv.c | 124 | ||||
-rw-r--r-- | drivers/s390/crypto/vfio_ap_ops.c | 1441 | ||||
-rw-r--r-- | drivers/s390/crypto/vfio_ap_private.h | 47 |
10 files changed, 1231 insertions, 480 deletions
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c index 488808dc17a2..795853dfc46b 100644 --- a/drivers/char/hw_random/s390-trng.c +++ b/drivers/char/hw_random/s390-trng.c @@ -252,5 +252,5 @@ static void __exit trng_exit(void) trng_debug_exit(); } -module_cpu_feature_match(MSA, trng_init); +module_cpu_feature_match(S390_CPU_FEATURE_MSA, trng_init); module_exit(trng_exit); diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index 57f41efb8043..7d1749b0d378 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -89,7 +89,7 @@ config HMC_DRV Management Console (HMC) drive CD/DVD-ROM. It is available as a module, called 'hmcdrv', and also as kernel built-in. There is one optional parameter for this module: cachesize=N, which modifies the - transfer cache size from it's default value 0.5MB to N bytes. If N + transfer cache size from its default value 0.5MB to N bytes. If N is zero, then no caching is performed. config SCLP_OFB diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 38cc1565d6ae..751945fb6793 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -548,7 +548,7 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request, case 0x2e: /* * Not capable. This indicates either that the drive fails - * reading the format id mark or that that format specified + * reading the format id mark or that format specified * is not supported by the drive. */ dev_warn (&device->cdev->dev, "The tape unit cannot process " diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c index 66505d7166a6..1d40457c7b10 100644 --- a/drivers/s390/char/uvdevice.c +++ b/drivers/s390/char/uvdevice.c @@ -27,6 +27,7 @@ #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/slab.h> +#include <linux/cpufeature.h> #include <asm/uvdevice.h> #include <asm/uv.h> @@ -244,12 +245,10 @@ static void __exit uvio_dev_exit(void) static int __init uvio_dev_init(void) { - if (!test_facility(158)) - return -ENXIO; return misc_register(&uvio_dev_miscdev); } -module_init(uvio_dev_init); +module_cpu_feature_match(S390_CPU_FEATURE_UV, uvio_dev_init); module_exit(uvio_dev_exit); MODULE_AUTHOR("IBM Corporation"); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 516783ba950f..f6da215ccf9f 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -17,6 +17,7 @@ #include <linux/debugfs.h> #include <linux/panic_notifier.h> #include <linux/reboot.h> +#include <linux/uio.h> #include <asm/asm-offsets.h> #include <asm/ipl.h> @@ -50,36 +51,41 @@ static struct dentry *zcore_reipl_file; static struct dentry *zcore_hsa_file; static struct ipl_parameter_block *zcore_ipl_block; +static DEFINE_MUTEX(hsa_buf_mutex); static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE); /* - * Copy memory from HSA to user memory (not reentrant): + * Copy memory from HSA to iterator (not reentrant): * - * @dest: User buffer where memory should be copied to + * @iter: Iterator where memory should be copied to * @src: Start address within HSA where data should be copied * @count: Size of buffer, which should be copied */ -int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) +size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count) { - unsigned long offset, bytes; + size_t bytes, copied, res = 0; + unsigned long offset; if (!hsa_available) - return -ENODATA; + return 0; + mutex_lock(&hsa_buf_mutex); while (count) { if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) { TRACE("sclp_sdias_copy() failed\n"); - return -EIO; + break; } offset = src % PAGE_SIZE; bytes = min(PAGE_SIZE - offset, count); - if (copy_to_user(dest, hsa_buf + offset, bytes)) - return -EFAULT; - src += bytes; - dest += bytes; - count -= bytes; + copied = copy_to_iter(hsa_buf + offset, bytes, iter); + count -= copied; + src += copied; + res += copied; + if (copied < bytes) + break; } - return 0; + mutex_unlock(&hsa_buf_mutex); + return res; } /* @@ -89,25 +95,16 @@ int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) * @src: Start address within HSA where data should be copied * @count: Size of buffer, which should be copied */ -int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) +static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count) { - unsigned long offset, bytes; + struct iov_iter iter; + struct kvec kvec; - if (!hsa_available) - return -ENODATA; - - while (count) { - if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) { - TRACE("sclp_sdias_copy() failed\n"); - return -EIO; - } - offset = src % PAGE_SIZE; - bytes = min(PAGE_SIZE - offset, count); - memcpy(dest, hsa_buf + offset, bytes); - src += bytes; - dest += bytes; - count -= bytes; - } + kvec.iov_base = dst; + kvec.iov_len = count; + iov_iter_kvec(&iter, WRITE, &kvec, 1, count); + if (memcpy_hsa_iter(&iter, src, count) < count) + return -EIO; return 0; } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 95cfdeab5baf..8f1d1cf23d44 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -838,6 +838,17 @@ static void ap_bus_revise_bindings(void) bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved); } +/** + * ap_owned_by_def_drv: indicates whether an AP adapter is reserved for the + * default host driver or not. + * @card: the APID of the adapter card to check + * @queue: the APQI of the queue to check + * + * Note: the ap_perms_mutex must be locked by the caller of this function. + * + * Return: an int specifying whether the AP adapter is reserved for the host (1) + * or not (0). + */ int ap_owned_by_def_drv(int card, int queue) { int rc = 0; @@ -845,25 +856,31 @@ int ap_owned_by_def_drv(int card, int queue) if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS) return -EINVAL; - mutex_lock(&ap_perms_mutex); - if (test_bit_inv(card, ap_perms.apm) && test_bit_inv(queue, ap_perms.aqm)) rc = 1; - mutex_unlock(&ap_perms_mutex); - return rc; } EXPORT_SYMBOL(ap_owned_by_def_drv); +/** + * ap_apqn_in_matrix_owned_by_def_drv: indicates whether every APQN contained in + * a set is reserved for the host drivers + * or not. + * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check + * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check + * + * Note: the ap_perms_mutex must be locked by the caller of this function. + * + * Return: an int specifying whether each APQN is reserved for the host (1) or + * not (0) + */ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm, unsigned long *aqm) { int card, queue, rc = 0; - mutex_lock(&ap_perms_mutex); - for (card = 0; !rc && card < AP_DEVICES; card++) if (test_bit_inv(card, apm) && test_bit_inv(card, ap_perms.apm)) @@ -872,8 +889,6 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm, test_bit_inv(queue, ap_perms.aqm)) rc = 1; - mutex_unlock(&ap_perms_mutex); - return rc; } EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 7329caa7d467..5a05d1cdfec2 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -2115,5 +2115,5 @@ static void __exit pkey_exit(void) pkey_debug_exit(); } -module_cpu_feature_match(MSA, pkey_init); +module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init); module_exit(pkey_exit); diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index 4ac9c6521ec1..f43cfeabd2cc 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -18,9 +18,6 @@ #define VFIO_AP_ROOT_NAME "vfio_ap" #define VFIO_AP_DEV_NAME "matrix" -#define AP_QUEUE_ASSIGNED "assigned" -#define AP_QUEUE_UNASSIGNED "unassigned" -#define AP_QUEUE_IN_USE "in use" MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018"); @@ -46,120 +43,12 @@ static struct ap_device_id ap_queue_ids[] = { { /* end of sibling */ }, }; -static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q) -{ - struct ap_matrix_mdev *matrix_mdev; - unsigned long apid = AP_QID_CARD(q->apqn); - unsigned long apqi = AP_QID_QUEUE(q->apqn); - - list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { - if (test_bit_inv(apid, matrix_mdev->matrix.apm) && - test_bit_inv(apqi, matrix_mdev->matrix.aqm)) - return matrix_mdev; - } - - return NULL; -} - -static ssize_t status_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t nchars = 0; - struct vfio_ap_queue *q; - struct ap_matrix_mdev *matrix_mdev; - struct ap_device *apdev = to_ap_dev(dev); - - mutex_lock(&matrix_dev->lock); - q = dev_get_drvdata(&apdev->device); - matrix_mdev = vfio_ap_mdev_for_queue(q); - - if (matrix_mdev) { - if (matrix_mdev->kvm) - nchars = scnprintf(buf, PAGE_SIZE, "%s\n", - AP_QUEUE_IN_USE); - else - nchars = scnprintf(buf, PAGE_SIZE, "%s\n", - AP_QUEUE_ASSIGNED); - } else { - nchars = scnprintf(buf, PAGE_SIZE, "%s\n", - AP_QUEUE_UNASSIGNED); - } - - mutex_unlock(&matrix_dev->lock); - - return nchars; -} - -static DEVICE_ATTR_RO(status); - -static struct attribute *vfio_queue_attrs[] = { - &dev_attr_status.attr, - NULL, -}; - -static const struct attribute_group vfio_queue_attr_group = { - .attrs = vfio_queue_attrs, -}; - -/** - * vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it - * with the device as driver_data. - * - * @apdev: the AP device being probed - * - * Return: returns 0 if the probe succeeded; otherwise, returns an error if - * storage could not be allocated for a vfio_ap_queue object or the - * sysfs 'status' attribute could not be created for the queue device. - */ -static int vfio_ap_queue_dev_probe(struct ap_device *apdev) -{ - int ret; - struct vfio_ap_queue *q; - - q = kzalloc(sizeof(*q), GFP_KERNEL); - if (!q) - return -ENOMEM; - - mutex_lock(&matrix_dev->lock); - dev_set_drvdata(&apdev->device, q); - q->apqn = to_ap_queue(&apdev->device)->qid; - q->saved_isc = VFIO_AP_ISC_INVALID; - - ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group); - if (ret) { - dev_set_drvdata(&apdev->device, NULL); - kfree(q); - } - - mutex_unlock(&matrix_dev->lock); - - return ret; -} - -/** - * vfio_ap_queue_dev_remove: Free the associated vfio_ap_queue structure. - * - * @apdev: the AP device being removed - * - * Takes the matrix lock to avoid actions on this device while doing the remove. - */ -static void vfio_ap_queue_dev_remove(struct ap_device *apdev) -{ - struct vfio_ap_queue *q; - - mutex_lock(&matrix_dev->lock); - sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); - q = dev_get_drvdata(&apdev->device); - vfio_ap_mdev_reset_queue(q, 1); - dev_set_drvdata(&apdev->device, NULL); - kfree(q); - mutex_unlock(&matrix_dev->lock); -} - static struct ap_driver vfio_ap_drv = { - .probe = vfio_ap_queue_dev_probe, - .remove = vfio_ap_queue_dev_remove, + .probe = vfio_ap_mdev_probe_queue, + .remove = vfio_ap_mdev_remove_queue, + .in_use = vfio_ap_mdev_resource_in_use, + .on_config_changed = vfio_ap_on_cfg_changed, + .on_scan_complete = vfio_ap_on_scan_complete, .ids = ap_queue_ids, }; @@ -212,8 +101,9 @@ static int vfio_ap_matrix_dev_create(void) goto matrix_alloc_err; } - mutex_init(&matrix_dev->lock); + mutex_init(&matrix_dev->mdevs_lock); INIT_LIST_HEAD(&matrix_dev->mdev_list); + mutex_init(&matrix_dev->guests_lock); dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME); matrix_dev->device.parent = root_device; diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 75cd92c291e3..6c8c41fac4e1 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -26,44 +26,193 @@ #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" -static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); +#define AP_QUEUE_ASSIGNED "assigned" +#define AP_QUEUE_UNASSIGNED "unassigned" +#define AP_QUEUE_IN_USE "in use" + +static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable); static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); static const struct vfio_device_ops vfio_ap_matrix_dev_ops; +static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry); -static int match_apqn(struct device *dev, const void *data) +/** + * get_update_locks_for_kvm: Acquire the locks required to dynamically update a + * KVM guest's APCB in the proper order. + * + * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. + * + * The proper locking order is: + * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM + * guest's APCB. + * 2. kvm->lock: required to update a guest's APCB + * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev + * + * Note: If @kvm is NULL, the KVM lock will not be taken. + */ +static inline void get_update_locks_for_kvm(struct kvm *kvm) { - struct vfio_ap_queue *q = dev_get_drvdata(dev); + mutex_lock(&matrix_dev->guests_lock); + if (kvm) + mutex_lock(&kvm->lock); + mutex_lock(&matrix_dev->mdevs_lock); +} - return (q->apqn == *(int *)(data)) ? 1 : 0; +/** + * release_update_locks_for_kvm: Release the locks used to dynamically update a + * KVM guest's APCB in the proper order. + * + * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. + * + * The proper unlocking order is: + * 1. matrix_dev->mdevs_lock + * 2. kvm->lock + * 3. matrix_dev->guests_lock + * + * Note: If @kvm is NULL, the KVM lock will not be released. + */ +static inline void release_update_locks_for_kvm(struct kvm *kvm) +{ + mutex_unlock(&matrix_dev->mdevs_lock); + if (kvm) + mutex_unlock(&kvm->lock); + mutex_unlock(&matrix_dev->guests_lock); } /** - * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list - * @matrix_mdev: the associated mediated matrix - * @apqn: The queue APQN + * get_update_locks_for_mdev: Acquire the locks required to dynamically update a + * KVM guest's APCB in the proper order. + * + * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP + * configuration data to use to update a KVM guest's APCB. * - * Retrieve a queue with a specific APQN from the list of the - * devices of the vfio_ap_drv. - * Verify that the APID and the APQI are set in the matrix. + * The proper locking order is: + * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM + * guest's APCB. + * 2. matrix_mdev->kvm->lock: required to update a guest's APCB + * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev * - * Return: the pointer to the associated vfio_ap_queue + * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM + * lock will not be taken. */ -static struct vfio_ap_queue *vfio_ap_get_queue( +static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) +{ + mutex_lock(&matrix_dev->guests_lock); + if (matrix_mdev && matrix_mdev->kvm) + mutex_lock(&matrix_mdev->kvm->lock); + mutex_lock(&matrix_dev->mdevs_lock); +} + +/** + * release_update_locks_for_mdev: Release the locks used to dynamically update a + * KVM guest's APCB in the proper order. + * + * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP + * configuration data to use to update a KVM guest's APCB. + * + * The proper unlocking order is: + * 1. matrix_dev->mdevs_lock + * 2. matrix_mdev->kvm->lock + * 3. matrix_dev->guests_lock + * + * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM + * lock will not be released. + */ +static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) +{ + mutex_unlock(&matrix_dev->mdevs_lock); + if (matrix_mdev && matrix_mdev->kvm) + mutex_unlock(&matrix_mdev->kvm->lock); + mutex_unlock(&matrix_dev->guests_lock); +} + +/** + * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and + * acquire the locks required to update the APCB of + * the KVM guest to which the mdev is attached. + * + * @apqn: the APQN of a queue device. + * + * The proper locking order is: + * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM + * guest's APCB. + * 2. matrix_mdev->kvm->lock: required to update a guest's APCB + * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev + * + * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock + * will not be taken. + * + * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn + * is not assigned to an ap_matrix_mdev. + */ +static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn) +{ + struct ap_matrix_mdev *matrix_mdev; + + mutex_lock(&matrix_dev->guests_lock); + + list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) && + test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) { + if (matrix_mdev->kvm) + mutex_lock(&matrix_mdev->kvm->lock); + + mutex_lock(&matrix_dev->mdevs_lock); + + return matrix_mdev; + } + } + + mutex_lock(&matrix_dev->mdevs_lock); + + return NULL; +} + +/** + * get_update_locks_for_queue: get the locks required to update the APCB of the + * KVM guest to which the matrix mdev linked to a + * vfio_ap_queue object is attached. + * + * @q: a pointer to a vfio_ap_queue object. + * + * The proper locking order is: + * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a + * KVM guest's APCB. + * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB + * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev + * + * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock + * will not be taken. + */ +static inline void get_update_locks_for_queue(struct vfio_ap_queue *q) +{ + mutex_lock(&matrix_dev->guests_lock); + if (q->matrix_mdev && q->matrix_mdev->kvm) + mutex_lock(&q->matrix_mdev->kvm->lock); + mutex_lock(&matrix_dev->mdevs_lock); +} + +/** + * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a + * hash table of queues assigned to a matrix mdev + * @matrix_mdev: the matrix mdev + * @apqn: The APQN of a queue device + * + * Return: the pointer to the vfio_ap_queue struct representing the queue or + * NULL if the queue is not assigned to @matrix_mdev + */ +static struct vfio_ap_queue *vfio_ap_mdev_get_queue( struct ap_matrix_mdev *matrix_mdev, int apqn) { struct vfio_ap_queue *q; - if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm)) - return NULL; - if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) - return NULL; - - q = vfio_ap_find_queue(apqn); - if (q) - q->matrix_mdev = matrix_mdev; + hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode, + apqn) { + if (q && q->apqn == apqn) + return q; + } - return q; + return NULL; } /** @@ -180,7 +329,6 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) status.response_code); end_free: vfio_ap_free_aqic_resources(q); - q->matrix_mdev = NULL; return status; } @@ -399,10 +547,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } - mutex_lock(&matrix_dev->lock); + mutex_lock(&matrix_dev->mdevs_lock); + if (!vcpu->kvm->arch.crypto.pqap_hook) { VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n", __func__, apqn); + goto out_unlock; } @@ -418,7 +568,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu) goto out_unlock; } - q = vfio_ap_get_queue(matrix_mdev, apqn); + q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); if (!q) { VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n", __func__, AP_QID_CARD(apqn), @@ -437,7 +587,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu) out_unlock: memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus)); vcpu->run->s.regs.gprs[1] >>= 32; - mutex_unlock(&matrix_dev->lock); + mutex_unlock(&matrix_dev->mdevs_lock); return 0; } @@ -449,6 +599,91 @@ static void vfio_ap_matrix_init(struct ap_config_info *info, matrix->adm_max = info->apxa ? info->Nd : 15; } +static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) +{ + if (matrix_mdev->kvm) + kvm_arch_crypto_set_masks(matrix_mdev->kvm, + matrix_mdev->shadow_apcb.apm, + matrix_mdev->shadow_apcb.aqm, + matrix_mdev->shadow_apcb.adm); +} + +static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) +{ + DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS); + + bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS); + bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm, + (unsigned long *)matrix_dev->info.adm, AP_DOMAINS); + + return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, + AP_DOMAINS); +} + +/* + * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev + * to ensure no queue devices are passed through to + * the guest that are not bound to the vfio_ap + * device driver. + * + * @matrix_mdev: the matrix mdev whose matrix is to be filtered. + * + * Note: If an APQN referencing a queue device that is not bound to the vfio_ap + * driver, its APID will be filtered from the guest's APCB. The matrix + * structure precludes filtering an individual APQN, so its APID will be + * filtered. + * + * Return: a boolean value indicating whether the KVM guest's APCB was changed + * by the filtering or not. + */ +static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm, + struct ap_matrix_mdev *matrix_mdev) +{ + unsigned long apid, apqi, apqn; + DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES); + DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS); + struct vfio_ap_queue *q; + + bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); + bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); + vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); + + /* + * Copy the adapters, domains and control domains to the shadow_apcb + * from the matrix mdev, but only those that are assigned to the host's + * AP configuration. + */ + bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm, + (unsigned long *)matrix_dev->info.apm, AP_DEVICES); + bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm, + (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS); + + for_each_set_bit_inv(apid, apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { + /* + * If the APQN is not bound to the vfio_ap device + * driver, then we can't assign it to the guest's + * AP configuration. The AP architecture won't + * allow filtering of a single APQN, so let's filter + * the APID since an adapter represents a physical + * hardware device. + */ + apqn = AP_MKQID(apid, apqi); + q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); + if (!q || q->reset_rc) { + clear_bit_inv(apid, + matrix_mdev->shadow_apcb.apm); + break; + } + } + } + + return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, + AP_DEVICES) || + !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, + AP_DOMAINS); +} + static int vfio_ap_mdev_probe(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev; @@ -468,20 +703,19 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev) matrix_mdev->mdev = mdev; vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); matrix_mdev->pqap_hook = handle_pqap; - mutex_lock(&matrix_dev->lock); - list_add(&matrix_mdev->node, &matrix_dev->mdev_list); - mutex_unlock(&matrix_dev->lock); + vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); + hash_init(matrix_mdev->qtable.queues); ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); if (ret) goto err_list; dev_set_drvdata(&mdev->dev, matrix_mdev); + mutex_lock(&matrix_dev->mdevs_lock); + list_add(&matrix_mdev->node, &matrix_dev->mdev_list); + mutex_unlock(&matrix_dev->mdevs_lock); return 0; err_list: - mutex_lock(&matrix_dev->lock); - list_del(&matrix_mdev->node); - mutex_unlock(&matrix_dev->lock); vfio_uninit_group_dev(&matrix_mdev->vdev); kfree(matrix_mdev); err_dec_available: @@ -489,16 +723,62 @@ err_dec_available: return ret; } +static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev, + struct vfio_ap_queue *q) +{ + if (q) { + q->matrix_mdev = matrix_mdev; + hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); + } +} + +static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn) +{ + struct vfio_ap_queue *q; + + q = vfio_ap_find_queue(apqn); + vfio_ap_mdev_link_queue(matrix_mdev, q); +} + +static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q) +{ + hash_del(&q->mdev_qnode); +} + +static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q) +{ + q->matrix_mdev = NULL; +} + +static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) +{ + struct vfio_ap_queue *q; + unsigned long apid, apqi; + + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, + AP_DOMAINS) { + q = vfio_ap_mdev_get_queue(matrix_mdev, + AP_MKQID(apid, apqi)); + if (q) + q->matrix_mdev = NULL; + } + } +} + static void vfio_ap_mdev_remove(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); vfio_unregister_group_dev(&matrix_mdev->vdev); - mutex_lock(&matrix_dev->lock); - vfio_ap_mdev_reset_queues(matrix_mdev); + mutex_lock(&matrix_dev->guests_lock); + mutex_lock(&matrix_dev->mdevs_lock); + vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); + vfio_ap_mdev_unlink_fr_queues(matrix_mdev); list_del(&matrix_mdev->node); - mutex_unlock(&matrix_dev->lock); + mutex_unlock(&matrix_dev->mdevs_lock); + mutex_unlock(&matrix_dev->guests_lock); vfio_uninit_group_dev(&matrix_mdev->vdev); kfree(matrix_mdev); atomic_inc(&matrix_dev->available_instances); @@ -547,141 +827,48 @@ static struct attribute_group *vfio_ap_mdev_type_groups[] = { NULL, }; -struct vfio_ap_queue_reserved { - unsigned long *apid; - unsigned long *apqi; - bool reserved; -}; +#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ + "already assigned to %s" -/** - * vfio_ap_has_queue - determines if the AP queue containing the target in @data - * - * @dev: an AP queue device - * @data: a struct vfio_ap_queue_reserved reference - * - * Flags whether the AP queue device (@dev) has a queue ID containing the APQN, - * apid or apqi specified in @data: - * - * - If @data contains both an apid and apqi value, then @data will be flagged - * as reserved if the APID and APQI fields for the AP queue device matches - * - * - If @data contains only an apid value, @data will be flagged as - * reserved if the APID field in the AP queue device matches - * - * - If @data contains only an apqi value, @data will be flagged as - * reserved if the APQI field in the AP queue device matches - * - * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if - * @data does not contain either an apid or apqi. - */ -static int vfio_ap_has_queue(struct device *dev, void *data) +static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, + unsigned long *apm, + unsigned long *aqm) { - struct vfio_ap_queue_reserved *qres = data; - struct ap_queue *ap_queue = to_ap_queue(dev); - ap_qid_t qid; - unsigned long id; - - if (qres->apid && qres->apqi) { - qid = AP_MKQID(*qres->apid, *qres->apqi); - if (qid == ap_queue->qid) - qres->reserved = true; - } else if (qres->apid && !qres->apqi) { - id = AP_QID_CARD(ap_queue->qid); - if (id == *qres->apid) - qres->reserved = true; - } else if (!qres->apid && qres->apqi) { - id = AP_QID_QUEUE(ap_queue->qid); - if (id == *qres->apqi) - qres->reserved = true; - } else { - return -EINVAL; - } - - return 0; -} - -/** - * vfio_ap_verify_queue_reserved - verifies that the AP queue containing - * @apid or @aqpi is reserved - * - * @apid: an AP adapter ID - * @apqi: an AP queue index - * - * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device - * driver according to the following rules: - * - * - If both @apid and @apqi are not NULL, then there must be an AP queue - * device bound to the vfio_ap driver with the APQN identified by @apid and - * @apqi - * - * - If only @apid is not NULL, then there must be an AP queue device bound - * to the vfio_ap driver with an APQN containing @apid - * - * - If only @apqi is not NULL, then there must be an AP queue device bound - * to the vfio_ap driver with an APQN containing @apqi - * - * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL. - */ -static int vfio_ap_verify_queue_reserved(unsigned long *apid, - unsigned long *apqi) -{ - int ret; - struct vfio_ap_queue_reserved qres; - - qres.apid = apid; - qres.apqi = apqi; - qres.reserved = false; - - ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL, - &qres, vfio_ap_has_queue); - if (ret) - return ret; - - if (qres.reserved) - return 0; - - return -EADDRNOTAVAIL; -} - -static int -vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev, - unsigned long apid) -{ - int ret; - unsigned long apqi; - unsigned long nbits = matrix_mdev->matrix.aqm_max + 1; - - if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits) - return vfio_ap_verify_queue_reserved(&apid, NULL); - - for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) { - ret = vfio_ap_verify_queue_reserved(&apid, &apqi); - if (ret) - return ret; - } + unsigned long apid, apqi; + const struct device *dev = mdev_dev(matrix_mdev->mdev); + const char *mdev_name = dev_name(dev); - return 0; + for_each_set_bit_inv(apid, apm, AP_DEVICES) + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) + dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); } /** - * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured + * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs * - * @matrix_mdev: the mediated matrix device + * @mdev_apm: mask indicating the APIDs of the APQNs to be verified + * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified * - * Verifies that the APQNs derived from the cross product of the AP adapter IDs - * and AP queue indexes comprising the AP matrix are not configured for another + * Verifies that each APQN derived from the Cartesian product of a bitmap of + * AP adapter IDs and AP queue indexes is not configured for any matrix * mediated device. AP queue sharing is not allowed. * - * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE. + * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. */ -static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) +static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, + unsigned long *mdev_aqm) { - struct ap_matrix_mdev *lstdev; + struct ap_matrix_mdev *matrix_mdev; DECLARE_BITMAP(apm, AP_DEVICES); DECLARE_BITMAP(aqm, AP_DOMAINS); - list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) { - if (matrix_mdev == lstdev) + list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + /* + * If the input apm and aqm are fields of the matrix_mdev + * object, then move on to the next matrix_mdev. + */ + if (mdev_apm == matrix_mdev->matrix.apm && + mdev_aqm == matrix_mdev->matrix.aqm) continue; memset(apm, 0, sizeof(apm)); @@ -691,14 +878,16 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) * We work on full longs, as we can only exclude the leftover * bits in non-inverse order. The leftover is all zeros. */ - if (!bitmap_and(apm, matrix_mdev->matrix.apm, - lstdev->matrix.apm, AP_DEVICES)) + if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, + AP_DEVICES)) continue; - if (!bitmap_and(aqm, matrix_mdev->matrix.aqm, - lstdev->matrix.aqm, AP_DOMAINS)) + if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, + AP_DOMAINS)) continue; + vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); + return -EADDRINUSE; } @@ -706,6 +895,41 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) } /** + * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are + * not reserved for the default zcrypt driver and + * are not assigned to another mdev. + * + * @matrix_mdev: the mdev to which the APQNs being validated are assigned. + * + * Return: One of the following values: + * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function, + * most likely -EBUSY indicating the ap_perms_mutex lock is already held. + * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the + * zcrypt default driver. + * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev + * o A zero indicating validation succeeded. + */ +static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) +{ + if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm, + matrix_mdev->matrix.aqm)) + return -EADDRNOTAVAIL; + + return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, + matrix_mdev->matrix.aqm); +} + +static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid) +{ + unsigned long apqi; + + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) + vfio_ap_mdev_link_apqn(matrix_mdev, + AP_MKQID(apid, apqi)); +} + +/** * assign_adapter_store - parses the APID from @buf and sets the * corresponding bit in the mediated matrix device's APM * @@ -734,6 +958,10 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) * An APQN derived from the cross product of the APID being assigned * and the APQIs previously assigned is being used by another mediated * matrix device + * + * 5. -EAGAIN + * A lock required to validate the mdev's AP configuration could not + * be obtained. */ static ssize_t assign_adapter_store(struct device *dev, struct device_attribute *attr, @@ -741,15 +969,11 @@ static ssize_t assign_adapter_store(struct device *dev, { int ret; unsigned long apid; + DECLARE_BITMAP(apm_delta, AP_DEVICES); struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow assignment of adapter */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + mutex_lock(&ap_perms_mutex); + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apid); if (ret) @@ -760,33 +984,97 @@ static ssize_t assign_adapter_store(struct device *dev, goto done; } - /* - * Set the bit in the AP mask (APM) corresponding to the AP adapter - * number (APID). The bits in the mask, from most significant to least - * significant bit, correspond to APIDs 0-255. - */ - ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid); - if (ret) + set_bit_inv(apid, matrix_mdev->matrix.apm); + + ret = vfio_ap_mdev_validate_masks(matrix_mdev); + if (ret) { + clear_bit_inv(apid, matrix_mdev->matrix.apm); goto done; + } - set_bit_inv(apid, matrix_mdev->matrix.apm); + vfio_ap_mdev_link_adapter(matrix_mdev, apid); + memset(apm_delta, 0, sizeof(apm_delta)); + set_bit_inv(apid, apm_delta); - ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); - if (ret) - goto share_err; + if (vfio_ap_mdev_filter_matrix(apm_delta, + matrix_mdev->matrix.aqm, matrix_mdev)) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); ret = count; - goto done; - -share_err: - clear_bit_inv(apid, matrix_mdev->matrix.apm); done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); + mutex_unlock(&ap_perms_mutex); return ret; } static DEVICE_ATTR_WO(assign_adapter); +static struct vfio_ap_queue +*vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid, unsigned long apqi) +{ + struct vfio_ap_queue *q = NULL; + + q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); + /* If the queue is assigned to the matrix mdev, unlink it. */ + if (q) + vfio_ap_unlink_queue_fr_mdev(q); + + return q; +} + +/** + * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned + * adapter from the matrix mdev to which the + * adapter was assigned. + * @matrix_mdev: the matrix mediated device to which the adapter was assigned. + * @apid: the APID of the unassigned adapter. + * @qtable: table for storing queues associated with unassigned adapter. + */ +static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid, + struct ap_queue_table *qtable) +{ + unsigned long apqi; + struct vfio_ap_queue *q; + + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) { + q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); + + if (q && qtable) { + if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && + test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) + hash_add(qtable->queues, &q->mdev_qnode, + q->apqn); + } + } +} + +static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid) +{ + int loop_cursor; + struct vfio_ap_queue *q; + struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL); + + hash_init(qtable->queues); + vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable); + + if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { + clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + } + + vfio_ap_mdev_reset_queues(qtable); + + hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { + vfio_ap_unlink_mdev_fr_queue(q); + hash_del(&q->mdev_qnode); + } + + kfree(qtable); +} + /** * unassign_adapter_store - parses the APID from @buf and clears the * corresponding bit in the mediated matrix device's APM @@ -810,13 +1098,7 @@ static ssize_t unassign_adapter_store(struct device *dev, unsigned long apid; struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow unassignment of adapter */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apid); if (ret) @@ -828,31 +1110,22 @@ static ssize_t unassign_adapter_store(struct device *dev, } clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); + vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid); ret = count; done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); return ret; } static DEVICE_ATTR_WO(unassign_adapter); -static int -vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev, - unsigned long apqi) +static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev, + unsigned long apqi) { - int ret; unsigned long apid; - unsigned long nbits = matrix_mdev->matrix.apm_max + 1; - - if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits) - return vfio_ap_verify_queue_reserved(NULL, &apqi); - for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) { - ret = vfio_ap_verify_queue_reserved(&apid, &apqi); - if (ret) - return ret; - } - - return 0; + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) + vfio_ap_mdev_link_apqn(matrix_mdev, + AP_MKQID(apid, apqi)); } /** @@ -884,6 +1157,10 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev, * An APQN derived from the cross product of the APQI being assigned * and the APIDs previously assigned is being used by another mediated * matrix device + * + * 5. -EAGAIN + * The lock required to validate the mdev's AP configuration could not + * be obtained. */ static ssize_t assign_domain_store(struct device *dev, struct device_attribute *attr, @@ -891,47 +1168,89 @@ static ssize_t assign_domain_store(struct device *dev, { int ret; unsigned long apqi; + DECLARE_BITMAP(aqm_delta, AP_DOMAINS); struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - unsigned long max_apqi = matrix_mdev->matrix.aqm_max; - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow assignment of domain */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + mutex_lock(&ap_perms_mutex); + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apqi); if (ret) goto done; - if (apqi > max_apqi) { + + if (apqi > matrix_mdev->matrix.aqm_max) { ret = -ENODEV; goto done; } - ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi); - if (ret) + set_bit_inv(apqi, matrix_mdev->matrix.aqm); + + ret = vfio_ap_mdev_validate_masks(matrix_mdev); + if (ret) { + clear_bit_inv(apqi, matrix_mdev->matrix.aqm); goto done; + } - set_bit_inv(apqi, matrix_mdev->matrix.aqm); + vfio_ap_mdev_link_domain(matrix_mdev, apqi); + memset(aqm_delta, 0, sizeof(aqm_delta)); + set_bit_inv(apqi, aqm_delta); - ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); - if (ret) - goto share_err; + if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta, + matrix_mdev)) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); ret = count; - goto done; - -share_err: - clear_bit_inv(apqi, matrix_mdev->matrix.aqm); done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); + mutex_unlock(&ap_perms_mutex); return ret; } static DEVICE_ATTR_WO(assign_domain); +static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, + unsigned long apqi, + struct ap_queue_table *qtable) +{ + unsigned long apid; + struct vfio_ap_queue *q; + + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { + q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); + + if (q && qtable) { + if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && + test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) + hash_add(qtable->queues, &q->mdev_qnode, + q->apqn); + } + } +} + +static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, + unsigned long apqi) +{ + int loop_cursor; + struct vfio_ap_queue *q; + struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL); + + hash_init(qtable->queues); + vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable); + + if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { + clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + } + + vfio_ap_mdev_reset_queues(qtable); + + hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { + vfio_ap_unlink_mdev_fr_queue(q); + hash_del(&q->mdev_qnode); + } + + kfree(qtable); +} /** * unassign_domain_store - parses the APQI from @buf and clears the @@ -956,13 +1275,7 @@ static ssize_t unassign_domain_store(struct device *dev, unsigned long apqi; struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow unassignment of domain */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apqi); if (ret) @@ -974,10 +1287,11 @@ static ssize_t unassign_domain_store(struct device *dev, } clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); + vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi); ret = count; done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); return ret; } static DEVICE_ATTR_WO(unassign_domain); @@ -1004,13 +1318,7 @@ static ssize_t assign_control_domain_store(struct device *dev, unsigned long id; struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&matrix_dev->lock); - - /* If the KVM guest is running, disallow assignment of control domain */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &id); if (ret) @@ -1027,9 +1335,12 @@ static ssize_t assign_control_domain_store(struct device *dev, * number of control domains that can be assigned. */ set_bit_inv(id, matrix_mdev->matrix.adm); + if (vfio_ap_mdev_filter_cdoms(matrix_mdev)) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + ret = count; done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); return ret; } static DEVICE_ATTR_WO(assign_control_domain); @@ -1055,28 +1366,28 @@ static ssize_t unassign_control_domain_store(struct device *dev, int ret; unsigned long domid; struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - unsigned long max_domid = matrix_mdev->matrix.adm_max; - mutex_lock(&matrix_dev->lock); - - /* If a KVM guest is running, disallow unassignment of control domain */ - if (matrix_mdev->kvm) { - ret = -EBUSY; - goto done; - } + get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &domid); if (ret) goto done; - if (domid > max_domid) { + + if (domid > matrix_mdev->matrix.adm_max) { ret = -ENODEV; goto done; } clear_bit_inv(domid, matrix_mdev->matrix.adm); + + if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) { + clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm); + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + } + ret = count; done: - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_mdev(matrix_mdev); return ret; } static DEVICE_ATTR_WO(unassign_control_domain); @@ -1092,40 +1403,36 @@ static ssize_t control_domains_show(struct device *dev, struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); unsigned long max_domid = matrix_mdev->matrix.adm_max; - mutex_lock(&matrix_dev->lock); + mutex_lock(&matrix_dev->mdevs_lock); for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) { n = sprintf(bufpos, "%04lx\n", id); bufpos += n; nchars += n; } - mutex_unlock(&matrix_dev->lock); + mutex_unlock(&matrix_dev->mdevs_lock); return nchars; } static DEVICE_ATTR_RO(control_domains); -static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf) { - struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); char *bufpos = buf; unsigned long apid; unsigned long apqi; unsigned long apid1; unsigned long apqi1; - unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1; - unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1; + unsigned long napm_bits = matrix->apm_max + 1; + unsigned long naqm_bits = matrix->aqm_max + 1; int nchars = 0; int n; - apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits); - apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits); - - mutex_lock(&matrix_dev->lock); + apid1 = find_first_bit_inv(matrix->apm, napm_bits); + apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits); if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) { - for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) { - for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, + for_each_set_bit_inv(apid, matrix->apm, napm_bits) { + for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) { n = sprintf(bufpos, "%02lx.%04lx\n", apid, apqi); @@ -1134,25 +1441,50 @@ static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, } } } else if (apid1 < napm_bits) { - for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) { + for_each_set_bit_inv(apid, matrix->apm, napm_bits) { n = sprintf(bufpos, "%02lx.\n", apid); bufpos += n; nchars += n; } } else if (apqi1 < naqm_bits) { - for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) { + for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) { n = sprintf(bufpos, ".%04lx\n", apqi); bufpos += n; nchars += n; } } - mutex_unlock(&matrix_dev->lock); + return nchars; +} + +static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + ssize_t nchars; + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); + + mutex_lock(&matrix_dev->mdevs_lock); + nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf); + mutex_unlock(&matrix_dev->mdevs_lock); return nchars; } static DEVICE_ATTR_RO(matrix); +static ssize_t guest_matrix_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t nchars; + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); + + mutex_lock(&matrix_dev->mdevs_lock); + nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf); + mutex_unlock(&matrix_dev->mdevs_lock); + + return nchars; +} +static DEVICE_ATTR_RO(guest_matrix); + static struct attribute *vfio_ap_mdev_attrs[] = { &dev_attr_assign_adapter.attr, &dev_attr_unassign_adapter.attr, @@ -1162,6 +1494,7 @@ static struct attribute *vfio_ap_mdev_attrs[] = { &dev_attr_unassign_control_domain.attr, &dev_attr_control_domains.attr, &dev_attr_matrix.attr, + &dev_attr_guest_matrix.attr, NULL, }; @@ -1194,26 +1527,20 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; up_write(&kvm->arch.crypto.pqap_hook_rwsem); - mutex_lock(&kvm->lock); - mutex_lock(&matrix_dev->lock); + get_update_locks_for_kvm(kvm); list_for_each_entry(m, &matrix_dev->mdev_list, node) { if (m != matrix_mdev && m->kvm == kvm) { - mutex_unlock(&kvm->lock); - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_kvm(kvm); return -EPERM; } } kvm_get_kvm(kvm); matrix_mdev->kvm = kvm; - kvm_arch_crypto_set_masks(kvm, - matrix_mdev->matrix.apm, - matrix_mdev->matrix.aqm, - matrix_mdev->matrix.adm); + vfio_ap_mdev_update_guest_apcb(matrix_mdev); - mutex_unlock(&kvm->lock); - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_kvm(kvm); } return 0; @@ -1243,36 +1570,36 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) kvm->arch.crypto.pqap_hook = NULL; up_write(&kvm->arch.crypto.pqap_hook_rwsem); - mutex_lock(&kvm->lock); - mutex_lock(&matrix_dev->lock); + get_update_locks_for_kvm(kvm); kvm_arch_crypto_clear_masks(kvm); - vfio_ap_mdev_reset_queues(matrix_mdev); + vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); kvm_put_kvm(kvm); matrix_mdev->kvm = NULL; - mutex_unlock(&kvm->lock); - mutex_unlock(&matrix_dev->lock); + release_update_locks_for_kvm(kvm); } } static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) { - struct device *dev; + struct ap_queue *queue; struct vfio_ap_queue *q = NULL; - dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, - &apqn, match_apqn); - if (dev) { - q = dev_get_drvdata(dev); - put_device(dev); - } + queue = ap_get_qdev(apqn); + if (!queue) + return NULL; + + if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver) + q = dev_get_drvdata(&queue->ap_dev.device); + + put_device(&queue->ap_dev.device); return q; } -int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, - unsigned int retry) +static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, + unsigned int retry) { struct ap_queue_status status; int ret; @@ -1280,9 +1607,9 @@ int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, if (!q) return 0; - retry_zapq: status = ap_zapq(q->apqn); + q->reset_rc = status.response_code; switch (status.response_code) { case AP_RESPONSE_NORMAL: ret = 0; @@ -1297,12 +1624,17 @@ retry_zapq: case AP_RESPONSE_Q_NOT_AVAIL: case AP_RESPONSE_DECONFIGURED: case AP_RESPONSE_CHECKSTOPPED: - WARN_ON_ONCE(status.irq_enabled); + WARN_ONCE(status.irq_enabled, + "PQAP/ZAPQ for %02x.%04x failed with rc=%u while IRQ enabled", + AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn), + status.response_code); ret = -EBUSY; goto free_resources; default: /* things are really broken, give up */ - WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n", + WARN(true, + "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n", + AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn), status.response_code); return -EIO; } @@ -1314,7 +1646,8 @@ retry_zapq: msleep(20); status = ap_tapq(q->apqn, NULL); } - WARN_ON_ONCE(retry2 <= 0); + WARN_ONCE(retry2 <= 0, "unable to verify reset of queue %02x.%04x", + AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn)); free_resources: vfio_ap_free_aqic_resources(q); @@ -1322,27 +1655,20 @@ free_resources: return ret; } -static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) +static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable) { - int ret; - int rc = 0; - unsigned long apid, apqi; + int ret, loop_cursor, rc = 0; struct vfio_ap_queue *q; - for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, - matrix_mdev->matrix.apm_max + 1) { - for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, - matrix_mdev->matrix.aqm_max + 1) { - q = vfio_ap_find_queue(AP_MKQID(apid, apqi)); - ret = vfio_ap_mdev_reset_queue(q, 1); - /* - * Regardless whether a queue turns out to be busy, or - * is not operational, we need to continue resetting - * the remaining queues. - */ - if (ret) - rc = ret; - } + hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { + ret = vfio_ap_mdev_reset_queue(q, 1); + /* + * Regardless whether a queue turns out to be busy, or + * is not operational, we need to continue resetting + * the remaining queues. + */ + if (ret) + rc = ret; } return rc; @@ -1394,23 +1720,79 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, container_of(vdev, struct ap_matrix_mdev, vdev); int ret; - mutex_lock(&matrix_dev->lock); + mutex_lock(&matrix_dev->mdevs_lock); switch (cmd) { case VFIO_DEVICE_GET_INFO: ret = vfio_ap_mdev_get_device_info(arg); break; case VFIO_DEVICE_RESET: - ret = vfio_ap_mdev_reset_queues(matrix_mdev); + ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); break; default: ret = -EOPNOTSUPP; break; } - mutex_unlock(&matrix_dev->lock); + mutex_unlock(&matrix_dev->mdevs_lock); return ret; } +static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q) +{ + struct ap_matrix_mdev *matrix_mdev; + unsigned long apid = AP_QID_CARD(q->apqn); + unsigned long apqi = AP_QID_QUEUE(q->apqn); + + list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + if (test_bit_inv(apid, matrix_mdev->matrix.apm) && + test_bit_inv(apqi, matrix_mdev->matrix.aqm)) + return matrix_mdev; + } + + return NULL; +} + +static ssize_t status_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t nchars = 0; + struct vfio_ap_queue *q; + struct ap_matrix_mdev *matrix_mdev; + struct ap_device *apdev = to_ap_dev(dev); + + mutex_lock(&matrix_dev->mdevs_lock); + q = dev_get_drvdata(&apdev->device); + matrix_mdev = vfio_ap_mdev_for_queue(q); + + if (matrix_mdev) { + if (matrix_mdev->kvm) + nchars = scnprintf(buf, PAGE_SIZE, "%s\n", + AP_QUEUE_IN_USE); + else + nchars = scnprintf(buf, PAGE_SIZE, "%s\n", + AP_QUEUE_ASSIGNED); + } else { + nchars = scnprintf(buf, PAGE_SIZE, "%s\n", + AP_QUEUE_UNASSIGNED); + } + + mutex_unlock(&matrix_dev->mdevs_lock); + + return nchars; +} + +static DEVICE_ATTR_RO(status); + +static struct attribute *vfio_queue_attrs[] = { + &dev_attr_status.attr, + NULL, +}; + +static const struct attribute_group vfio_queue_attr_group = { + .attrs = vfio_queue_attrs, +}; + static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { .open_device = vfio_ap_mdev_open_device, .close_device = vfio_ap_mdev_close_device, @@ -1455,3 +1837,432 @@ void vfio_ap_mdev_unregister(void) mdev_unregister_device(&matrix_dev->device); mdev_unregister_driver(&vfio_ap_matrix_driver); } + +int vfio_ap_mdev_probe_queue(struct ap_device *apdev) +{ + int ret; + struct vfio_ap_queue *q; + struct ap_matrix_mdev *matrix_mdev; + + ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group); + if (ret) + return ret; + + q = kzalloc(sizeof(*q), GFP_KERNEL); + if (!q) + return -ENOMEM; + + q->apqn = to_ap_queue(&apdev->device)->qid; + q->saved_isc = VFIO_AP_ISC_INVALID; + matrix_mdev = get_update_locks_by_apqn(q->apqn); + + if (matrix_mdev) { + vfio_ap_mdev_link_queue(matrix_mdev, q); + + if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, + matrix_mdev->matrix.aqm, + matrix_mdev)) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + } + dev_set_drvdata(&apdev->device, q); + release_update_locks_for_mdev(matrix_mdev); + + return 0; +} + +void vfio_ap_mdev_remove_queue(struct ap_device *apdev) +{ + unsigned long apid, apqi; + struct vfio_ap_queue *q; + struct ap_matrix_mdev *matrix_mdev; + + sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); + q = dev_get_drvdata(&apdev->device); + get_update_locks_for_queue(q); + matrix_mdev = q->matrix_mdev; + + if (matrix_mdev) { + vfio_ap_unlink_queue_fr_mdev(q); + + apid = AP_QID_CARD(q->apqn); + apqi = AP_QID_QUEUE(q->apqn); + + /* + * If the queue is assigned to the guest's APCB, then remove + * the adapter's APID from the APCB and hot it into the guest. + */ + if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && + test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { + clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + } + } + + vfio_ap_mdev_reset_queue(q, 1); + dev_set_drvdata(&apdev->device, NULL); + kfree(q); + release_update_locks_for_mdev(matrix_mdev); +} + +/** + * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is + * assigned to a mediated device under the control + * of the vfio_ap device driver. + * + * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check. + * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check. + * + * Return: + * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are + * assigned to a mediated device under the control of the vfio_ap + * device driver. + * * Otherwise, return 0. + */ +int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm) +{ + int ret; + + mutex_lock(&matrix_dev->guests_lock); + mutex_lock(&matrix_dev->mdevs_lock); + ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); + mutex_unlock(&matrix_dev->mdevs_lock); + mutex_unlock(&matrix_dev->guests_lock); + + return ret; +} + +/** + * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control + * domains that have been removed from the host's + * AP configuration from a guest. + * + * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest. + * @aprem: the adapters that have been removed from the host's AP configuration + * @aqrem: the domains that have been removed from the host's AP configuration + * @cdrem: the control domains that have been removed from the host's AP + * configuration. + */ +static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev, + unsigned long *aprem, + unsigned long *aqrem, + unsigned long *cdrem) +{ + int do_hotplug = 0; + + if (!bitmap_empty(aprem, AP_DEVICES)) { + do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm, + matrix_mdev->shadow_apcb.apm, + aprem, AP_DEVICES); + } + + if (!bitmap_empty(aqrem, AP_DOMAINS)) { + do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm, + matrix_mdev->shadow_apcb.aqm, + aqrem, AP_DEVICES); + } + + if (!bitmap_empty(cdrem, AP_DOMAINS)) + do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm, + matrix_mdev->shadow_apcb.adm, + cdrem, AP_DOMAINS); + + if (do_hotplug) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); +} + +/** + * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters, + * domains and control domains that have been removed + * from the host AP configuration and unplugs them + * from those guests. + * + * @ap_remove: bitmap specifying which adapters have been removed from the host + * config. + * @aq_remove: bitmap specifying which domains have been removed from the host + * config. + * @cd_remove: bitmap specifying which control domains have been removed from + * the host config. + */ +static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove, + unsigned long *aq_remove, + unsigned long *cd_remove) +{ + struct ap_matrix_mdev *matrix_mdev; + DECLARE_BITMAP(aprem, AP_DEVICES); + DECLARE_BITMAP(aqrem, AP_DOMAINS); + DECLARE_BITMAP(cdrem, AP_DOMAINS); + int do_remove = 0; + + list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + mutex_lock(&matrix_mdev->kvm->lock); + mutex_lock(&matrix_dev->mdevs_lock); + + do_remove |= bitmap_and(aprem, ap_remove, + matrix_mdev->matrix.apm, + AP_DEVICES); + do_remove |= bitmap_and(aqrem, aq_remove, + matrix_mdev->matrix.aqm, + AP_DOMAINS); + do_remove |= bitmap_andnot(cdrem, cd_remove, + matrix_mdev->matrix.adm, + AP_DOMAINS); + + if (do_remove) + vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem, + cdrem); + + mutex_unlock(&matrix_dev->mdevs_lock); + mutex_unlock(&matrix_mdev->kvm->lock); + } +} + +/** + * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and + * control domains from the host AP configuration + * by unplugging them from the guests that are + * using them. + * @cur_config_info: the current host AP configuration information + * @prev_config_info: the previous host AP configuration information + */ +static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info, + struct ap_config_info *prev_config_info) +{ + int do_remove; + DECLARE_BITMAP(aprem, AP_DEVICES); + DECLARE_BITMAP(aqrem, AP_DOMAINS); + DECLARE_BITMAP(cdrem, AP_DOMAINS); + + do_remove = bitmap_andnot(aprem, + (unsigned long *)prev_config_info->apm, + (unsigned long *)cur_config_info->apm, + AP_DEVICES); + do_remove |= bitmap_andnot(aqrem, + (unsigned long *)prev_config_info->aqm, + (unsigned long *)cur_config_info->aqm, + AP_DEVICES); + do_remove |= bitmap_andnot(cdrem, + (unsigned long *)prev_config_info->adm, + (unsigned long *)cur_config_info->adm, + AP_DEVICES); + + if (do_remove) + vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem); +} + +/** + * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that + * are older than AP type 10 (CEX4). + * @apm: a bitmap of the APIDs to examine + * @aqm: a bitmap of the APQIs of the queues to query for the AP type. + */ +static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm) +{ + bool apid_cleared; + struct ap_queue_status status; + unsigned long apid, apqi, info; + int qtype, qtype_mask = 0xff000000; + + for_each_set_bit_inv(apid, apm, AP_DEVICES) { + apid_cleared = false; + + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { + status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info); + switch (status.response_code) { + /* + * According to the architecture in each case + * below, the queue's info should be filled. + */ + case AP_RESPONSE_NORMAL: + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_BUSY: + qtype = info & qtype_mask; + + /* + * The vfio_ap device driver only + * supports CEX4 and newer adapters, so + * remove the APID if the adapter is + * older than a CEX4. + */ + if (qtype < AP_DEVICE_TYPE_CEX4) { + clear_bit_inv(apid, apm); + apid_cleared = true; + } + + break; + + default: + /* + * If we don't know the adapter type, + * clear its APID since it can't be + * determined whether the vfio_ap + * device driver supports it. + */ + clear_bit_inv(apid, apm); + apid_cleared = true; + break; + } + + /* + * If we've already cleared the APID from the apm, there + * is no need to continue examining the remainin AP + * queues to determine the type of the adapter. + */ + if (apid_cleared) + continue; + } + } +} + +/** + * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and + * control domains that have been added to the host's + * AP configuration for each matrix mdev to which they + * are assigned. + * + * @apm_add: a bitmap specifying the adapters that have been added to the AP + * configuration. + * @aqm_add: a bitmap specifying the domains that have been added to the AP + * configuration. + * @adm_add: a bitmap specifying the control domains that have been added to the + * AP configuration. + */ +static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add, + unsigned long *adm_add) +{ + struct ap_matrix_mdev *matrix_mdev; + + if (list_empty(&matrix_dev->mdev_list)) + return; + + vfio_ap_filter_apid_by_qtype(apm_add, aqm_add); + + list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + bitmap_and(matrix_mdev->apm_add, + matrix_mdev->matrix.apm, apm_add, AP_DEVICES); + bitmap_and(matrix_mdev->aqm_add, + matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS); + bitmap_and(matrix_mdev->adm_add, + matrix_mdev->matrix.adm, adm_add, AP_DEVICES); + } +} + +/** + * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and + * control domains to the host AP configuration + * by updating the bitmaps that specify what adapters, + * domains and control domains have been added so they + * can be hot plugged into the guest when the AP bus + * scan completes (see vfio_ap_on_scan_complete + * function). + * @cur_config_info: the current AP configuration information + * @prev_config_info: the previous AP configuration information + */ +static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info, + struct ap_config_info *prev_config_info) +{ + bool do_add; + DECLARE_BITMAP(apm_add, AP_DEVICES); + DECLARE_BITMAP(aqm_add, AP_DOMAINS); + DECLARE_BITMAP(adm_add, AP_DOMAINS); + + do_add = bitmap_andnot(apm_add, + (unsigned long *)cur_config_info->apm, + (unsigned long *)prev_config_info->apm, + AP_DEVICES); + do_add |= bitmap_andnot(aqm_add, + (unsigned long *)cur_config_info->aqm, + (unsigned long *)prev_config_info->aqm, + AP_DOMAINS); + do_add |= bitmap_andnot(adm_add, + (unsigned long *)cur_config_info->adm, + (unsigned long *)prev_config_info->adm, + AP_DOMAINS); + + if (do_add) + vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add); +} + +/** + * vfio_ap_on_cfg_changed - handles notification of changes to the host AP + * configuration. + * + * @cur_cfg_info: the current host AP configuration + * @prev_cfg_info: the previous host AP configuration + */ +void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info, + struct ap_config_info *prev_cfg_info) +{ + if (!cur_cfg_info || !prev_cfg_info) + return; + + mutex_lock(&matrix_dev->guests_lock); + + vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info); + vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info); + memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info)); + + mutex_unlock(&matrix_dev->guests_lock); +} + +static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev) +{ + bool do_hotplug = false; + int filter_domains = 0; + int filter_adapters = 0; + DECLARE_BITMAP(apm, AP_DEVICES); + DECLARE_BITMAP(aqm, AP_DOMAINS); + + mutex_lock(&matrix_mdev->kvm->lock); + mutex_lock(&matrix_dev->mdevs_lock); + + filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm, + matrix_mdev->apm_add, AP_DEVICES); + filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm, + matrix_mdev->aqm_add, AP_DOMAINS); + + if (filter_adapters && filter_domains) + do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev); + else if (filter_adapters) + do_hotplug |= + vfio_ap_mdev_filter_matrix(apm, + matrix_mdev->shadow_apcb.aqm, + matrix_mdev); + else + do_hotplug |= + vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm, + aqm, matrix_mdev); + + if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add, + AP_DOMAINS)) + do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev); + + if (do_hotplug) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + + mutex_unlock(&matrix_dev->mdevs_lock); + mutex_unlock(&matrix_mdev->kvm->lock); +} + +void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info, + struct ap_config_info *old_config_info) +{ + struct ap_matrix_mdev *matrix_mdev; + + mutex_lock(&matrix_dev->guests_lock); + + list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) && + bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) && + bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS)) + continue; + + vfio_ap_mdev_hot_plug_cfg(matrix_mdev); + bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES); + bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS); + bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS); + } + + mutex_unlock(&matrix_dev->guests_lock); +} diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index d912487175d3..d782cf463eab 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -19,6 +19,7 @@ #include <linux/mutex.h> #include <linux/kvm_host.h> #include <linux/vfio.h> +#include <linux/hashtable.h> #include "ap_bus.h" @@ -32,20 +33,26 @@ * @available_instances: number of mediated matrix devices that can be created * @info: the struct containing the output from the PQAP(QCI) instruction * @mdev_list: the list of mediated matrix devices created - * @lock: mutex for locking the AP matrix device. This lock will be + * @mdevs_lock: mutex for locking the AP matrix device. This lock will be * taken every time we fiddle with state managed by the vfio_ap * driver, be it using @mdev_list or writing the state of a * single ap_matrix_mdev device. It's quite coarse but we don't * expect much contention. * @vfio_ap_drv: the vfio_ap device driver + * @guests_lock: mutex for controlling access to a guest that is using AP + * devices passed through by the vfio_ap device driver. This lock + * will be taken when the AP devices are plugged into or unplugged + * from a guest, and when an ap_matrix_mdev device is added to or + * removed from @mdev_list or the list is iterated. */ struct ap_matrix_dev { struct device device; atomic_t available_instances; struct ap_config_info info; struct list_head mdev_list; - struct mutex lock; + struct mutex mdevs_lock; /* serializes access to each ap_matrix_mdev */ struct ap_driver *vfio_ap_drv; + struct mutex guests_lock; /* serializes access to each KVM guest */ }; extern struct ap_matrix_dev *matrix_dev; @@ -75,24 +82,44 @@ struct ap_matrix { }; /** + * struct ap_queue_table - a table of queue objects. + * + * @queues: a hashtable of queues (struct vfio_ap_queue). + */ +struct ap_queue_table { + DECLARE_HASHTABLE(queues, 8); +}; + +/** * struct ap_matrix_mdev - Contains the data associated with a matrix mediated * device. * @vdev: the vfio device * @node: allows the ap_matrix_mdev struct to be added to a list * @matrix: the adapters, usage domains and control domains assigned to the * mediated matrix device. + * @shadow_apcb: the shadow copy of the APCB field of the KVM guest's CRYCB * @kvm: the struct holding guest's state * @pqap_hook: the function pointer to the interception handler for the * PQAP(AQIC) instruction. * @mdev: the mediated device + * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev + * @apm_add: bitmap of APIDs added to the host's AP configuration + * @aqm_add: bitmap of APQIs added to the host's AP configuration + * @adm_add: bitmap of control domain numbers added to the host's AP + * configuration */ struct ap_matrix_mdev { struct vfio_device vdev; struct list_head node; struct ap_matrix matrix; + struct ap_matrix shadow_apcb; struct kvm *kvm; crypto_hook pqap_hook; struct mdev_device *mdev; + struct ap_queue_table qtable; + DECLARE_BITMAP(apm_add, AP_DEVICES); + DECLARE_BITMAP(aqm_add, AP_DOMAINS); + DECLARE_BITMAP(adm_add, AP_DOMAINS); }; /** @@ -102,6 +129,8 @@ struct ap_matrix_mdev { * @saved_iova: the notification indicator byte (nib) address * @apqn: the APQN of the AP queue device * @saved_isc: the guest ISC registered with the GIB interface + * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable + * @reset_rc: the status response code from the last reset of the queue */ struct vfio_ap_queue { struct ap_matrix_mdev *matrix_mdev; @@ -109,11 +138,21 @@ struct vfio_ap_queue { int apqn; #define VFIO_AP_ISC_INVALID 0xff unsigned char saved_isc; + struct hlist_node mdev_qnode; + unsigned int reset_rc; }; int vfio_ap_mdev_register(void); void vfio_ap_mdev_unregister(void); -int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, - unsigned int retry); + +int vfio_ap_mdev_probe_queue(struct ap_device *queue); +void vfio_ap_mdev_remove_queue(struct ap_device *queue); + +int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm); + +void vfio_ap_on_cfg_changed(struct ap_config_info *new_config_info, + struct ap_config_info *old_config_info); +void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info, + struct ap_config_info *old_config_info); #endif /* _VFIO_AP_PRIVATE_H_ */ |