summaryrefslogtreecommitdiff
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/css.c21
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c9
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c365
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h3
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c2
7 files changed, 225 insertions, 179 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c7db95398500..dfbb998db86f 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -740,12 +740,21 @@ void css_schedule_eval_all(void)
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
-static int __unset_registered(struct device *dev, void *data)
+static int __unset_validpath(struct device *dev, void *data)
{
struct idset *set = data;
struct subchannel *sch = to_subchannel(dev);
+ struct pmcw *pmcw = &sch->schib.pmcw;
+
+ /* Here we want to make sure that we are considering only those subchannels
+ * which do not have an operational device attached to it. This can be found
+ * with the help of PAM and POM values of pmcw. OPM provides the information
+ * about any path which is currently vary-off, so that we should not consider.
+ */
+ if (sch->st == SUBCHANNEL_TYPE_IO &&
+ (sch->opm & pmcw->pam & pmcw->pom))
+ idset_sch_del(set, sch->schid);
- idset_sch_del(set, sch->schid);
return 0;
}
@@ -774,8 +783,8 @@ void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
}
idset_fill(set);
switch (cond) {
- case CSS_EVAL_UNREG:
- bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
+ case CSS_EVAL_NO_PATH:
+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath);
break;
case CSS_EVAL_NOT_ONLINE:
bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
@@ -798,11 +807,11 @@ void css_wait_for_slow_path(void)
flush_workqueue(cio_work_q);
}
-/* Schedule reprobing of all unregistered subchannels. */
+/* Schedule reprobing of all subchannels with no valid operational path. */
void css_schedule_reprobe(void)
{
/* Schedule with a delay to allow merging of subsequent calls. */
- css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
+ css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ede0b905bc6f..ea5550554297 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -38,7 +38,7 @@
* Conditions used to specify which subchannels need evaluation
*/
enum css_eval_cond {
- CSS_EVAL_UNREG, /* unregistered subchannels */
+ CSS_EVAL_NO_PATH, /* Subchannels with no operational paths */
CSS_EVAL_NOT_ONLINE /* sch without an online-device */
};
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 9e0cf44ff9d4..5418e60dbfc3 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -244,10 +244,13 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
static void ccw_device_unregister(struct ccw_device *cdev)
{
+ mutex_lock(&cdev->reg_mutex);
if (device_is_registered(&cdev->dev)) {
/* Undo device_add(). */
device_del(&cdev->dev);
}
+ mutex_unlock(&cdev->reg_mutex);
+
if (cdev->private->flags.initialized) {
cdev->private->flags.initialized = 0;
/* Release reference from device_initialize(). */
@@ -653,11 +656,13 @@ static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
{
int ret;
+ mutex_lock(&cdev->reg_mutex);
if (device_is_registered(&cdev->dev)) {
device_release_driver(&cdev->dev);
ret = device_attach(&cdev->dev);
WARN_ON(ret == -ENODEV);
}
+ mutex_unlock(&cdev->reg_mutex);
}
static void
@@ -740,6 +745,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
INIT_LIST_HEAD(&priv->cmb_list);
init_waitqueue_head(&priv->wait_q);
timer_setup(&priv->timer, ccw_device_timeout, 0);
+ mutex_init(&cdev->reg_mutex);
atomic_set(&priv->onoff, 0);
cdev->ccwlock = sch->lock;
@@ -825,6 +831,7 @@ static void io_subchannel_register(struct ccw_device *cdev)
* be registered). We need to reprobe since we may now have sense id
* information.
*/
+ mutex_lock(&cdev->reg_mutex);
if (device_is_registered(&cdev->dev)) {
if (!cdev->drv) {
ret = device_reprobe(&cdev->dev);
@@ -847,12 +854,14 @@ static void io_subchannel_register(struct ccw_device *cdev)
spin_lock_irqsave(sch->lock, flags);
sch_set_cdev(sch, NULL);
spin_unlock_irqrestore(sch->lock, flags);
+ mutex_unlock(&cdev->reg_mutex);
/* Release initial device reference. */
put_device(&cdev->dev);
goto out_err;
}
out:
cdev->private->flags.recog_done = 1;
+ mutex_unlock(&cdev->reg_mutex);
wake_up(&cdev->private->wait_q);
out_err:
if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index c0a09fa8991a..1c31e81ca8de 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -42,8 +42,7 @@ struct ccwchain {
/*
* page_array_alloc() - alloc memory for page array
* @pa: page_array on which to perform the operation
- * @iova: target guest physical address
- * @len: number of bytes that should be pinned from @iova
+ * @len: number of pages that should be pinned from @iova
*
* Attempt to allocate memory for page array.
*
@@ -56,31 +55,24 @@ struct ccwchain {
* -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL
* -ENOMEM if alloc failed
*/
-static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
+static int page_array_alloc(struct page_array *pa, unsigned int len)
{
- int i;
-
if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
- pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (!pa->pa_nr)
+ if (len == 0)
return -EINVAL;
- pa->pa_iova = kcalloc(pa->pa_nr,
- sizeof(*pa->pa_iova) + sizeof(*pa->pa_page),
- GFP_KERNEL);
- if (unlikely(!pa->pa_iova)) {
- pa->pa_nr = 0;
+ pa->pa_nr = len;
+
+ pa->pa_iova = kcalloc(len, sizeof(*pa->pa_iova), GFP_KERNEL);
+ if (!pa->pa_iova)
return -ENOMEM;
- }
- pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
- pa->pa_iova[0] = iova;
- pa->pa_page[0] = NULL;
- for (i = 1; i < pa->pa_nr; i++) {
- pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
- pa->pa_page[i] = NULL;
+ pa->pa_page = kcalloc(len, sizeof(*pa->pa_page), GFP_KERNEL);
+ if (!pa->pa_page) {
+ kfree(pa->pa_iova);
+ return -ENOMEM;
}
return 0;
@@ -91,12 +83,13 @@ static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
* @pa: page_array on which to perform the operation
* @vdev: the vfio device to perform the operation
* @pa_nr: number of user pages to unpin
+ * @unaligned: were pages unaligned on the pin request
*
* Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0,
* otherwise only clear pa->pa_nr
*/
static void page_array_unpin(struct page_array *pa,
- struct vfio_device *vdev, int pa_nr)
+ struct vfio_device *vdev, int pa_nr, bool unaligned)
{
int unpinned = 0, npage = 1;
@@ -105,7 +98,8 @@ static void page_array_unpin(struct page_array *pa,
dma_addr_t *last = &first[npage];
if (unpinned + npage < pa_nr &&
- *first + npage * PAGE_SIZE == *last) {
+ *first + npage * PAGE_SIZE == *last &&
+ !unaligned) {
npage++;
continue;
}
@@ -121,13 +115,20 @@ static void page_array_unpin(struct page_array *pa,
/*
* page_array_pin() - Pin user pages in memory
* @pa: page_array on which to perform the operation
- * @mdev: the mediated device to perform pin operations
+ * @vdev: the vfio device to perform pin operations
+ * @unaligned: are pages aligned to 4K boundary?
*
* Returns number of pages pinned upon success.
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
+ *
+ * Requests to pin "aligned" pages can be coalesced into a single
+ * vfio_pin_pages request for the sake of efficiency, based on the
+ * expectation of 4K page requests. Unaligned requests are probably
+ * dealing with 2K "pages", and cannot be coalesced without
+ * reworking this logic to incorporate that math.
*/
-static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
+static int page_array_pin(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
{
int pinned = 0, npage = 1;
int ret = 0;
@@ -137,7 +138,8 @@ static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
dma_addr_t *last = &first[npage];
if (pinned + npage < pa->pa_nr &&
- *first + npage * PAGE_SIZE == *last) {
+ *first + npage * PAGE_SIZE == *last &&
+ !unaligned) {
npage++;
continue;
}
@@ -159,14 +161,15 @@ static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
return ret;
err_out:
- page_array_unpin(pa, vdev, pinned);
+ page_array_unpin(pa, vdev, pinned, unaligned);
return ret;
}
/* Unpin the pages before releasing the memory. */
-static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev)
+static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
{
- page_array_unpin(pa, vdev, pa->pa_nr);
+ page_array_unpin(pa, vdev, pa->pa_nr, unaligned);
+ kfree(pa->pa_page);
kfree(pa->pa_iova);
}
@@ -199,11 +202,12 @@ static inline void page_array_idal_create_words(struct page_array *pa,
* idaw.
*/
- for (i = 0; i < pa->pa_nr; i++)
+ for (i = 0; i < pa->pa_nr; i++) {
idaws[i] = page_to_phys(pa->pa_page[i]);
- /* Adjust the first IDAW, since it may not start on a page boundary */
- idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
+ /* Incorporate any offset from each starting address */
+ idaws[i] += pa->pa_iova[i] & (PAGE_SIZE - 1);
+ }
}
static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
@@ -228,50 +232,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
}
}
-/*
- * Within the domain (@mdev), copy @n bytes from a guest physical
- * address (@iova) to a host physical address (@to).
- */
-static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
- unsigned long n)
-{
- struct page_array pa = {0};
- int i, ret;
- unsigned long l, m;
-
- ret = page_array_alloc(&pa, iova, n);
- if (ret < 0)
- return ret;
-
- ret = page_array_pin(&pa, vdev);
- if (ret < 0) {
- page_array_unpin_free(&pa, vdev);
- return ret;
- }
-
- l = n;
- for (i = 0; i < pa.pa_nr; i++) {
- void *from = kmap_local_page(pa.pa_page[i]);
-
- m = PAGE_SIZE;
- if (i == 0) {
- from += iova & (PAGE_SIZE - 1);
- m -= iova & (PAGE_SIZE - 1);
- }
-
- m = min(l, m);
- memcpy(to + (n - l), from, m);
- kunmap_local(from);
-
- l -= m;
- if (l == 0)
- break;
- }
-
- page_array_unpin_free(&pa, vdev);
-
- return l;
-}
+#define idal_is_2k(_cp) (!(_cp)->orb.cmd.c64 || (_cp)->orb.cmd.i2k)
/*
* Helpers to operate ccwchain.
@@ -356,40 +317,41 @@ static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
{
struct ccwchain *chain;
- void *data;
- size_t size;
-
- /* Make ccw address aligned to 8. */
- size = ((sizeof(*chain) + 7L) & -8L) +
- sizeof(*chain->ch_ccw) * len +
- sizeof(*chain->ch_pa) * len;
- chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (!chain)
return NULL;
- data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L);
- chain->ch_ccw = (struct ccw1 *)data;
+ chain->ch_ccw = kcalloc(len, sizeof(*chain->ch_ccw), GFP_DMA | GFP_KERNEL);
+ if (!chain->ch_ccw)
+ goto out_err;
- data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pa = (struct page_array *)data;
-
- chain->ch_len = len;
+ chain->ch_pa = kcalloc(len, sizeof(*chain->ch_pa), GFP_KERNEL);
+ if (!chain->ch_pa)
+ goto out_err;
list_add_tail(&chain->next, &cp->ccwchain_list);
return chain;
+
+out_err:
+ kfree(chain->ch_ccw);
+ kfree(chain);
+ return NULL;
}
static void ccwchain_free(struct ccwchain *chain)
{
list_del(&chain->next);
+ kfree(chain->ch_pa);
+ kfree(chain->ch_ccw);
kfree(chain);
}
/* Free resource for a ccw that allocated memory for its cda. */
static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
- struct ccw1 *ccw = chain->ch_ccw + idx;
+ struct ccw1 *ccw = &chain->ch_ccw[idx];
if (ccw_is_tic(ccw))
return;
@@ -419,14 +381,6 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
cnt++;
/*
- * As we don't want to fail direct addressing even if the
- * orb specified one of the unsupported formats, we defer
- * checking for IDAWs in unsupported formats to here.
- */
- if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
- return -EOPNOTSUPP;
-
- /*
* We want to keep counting if the current CCW has the
* command-chaining flag enabled, or if it is a TIC CCW
* that loops back into the current chain. The latter
@@ -471,10 +425,9 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
int len, ret;
/* Copy 2K (the most we support today) of possible CCWs */
- len = copy_from_iova(vdev, cp->guest_cp, cda,
- CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
- if (len)
- return len;
+ ret = vfio_dma_rw(vdev, cda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false);
+ if (ret)
+ return ret;
/* Convert any Format-0 CCWs to Format-1 */
if (!cp->orb.cmd.fmt)
@@ -489,6 +442,8 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
chain = ccwchain_alloc(cp, len);
if (!chain)
return -ENOMEM;
+
+ chain->ch_len = len;
chain->ch_iova = cda;
/* Copy the actual CCWs into the new chain */
@@ -510,7 +465,7 @@ static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
int i, ret;
for (i = 0; i < chain->ch_len; i++) {
- tic = chain->ch_ccw + i;
+ tic = &chain->ch_ccw[i];
if (!ccw_is_tic(tic))
continue;
@@ -528,11 +483,9 @@ static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
return 0;
}
-static int ccwchain_fetch_tic(struct ccwchain *chain,
- int idx,
+static int ccwchain_fetch_tic(struct ccw1 *ccw,
struct channel_program *cp)
{
- struct ccw1 *ccw = chain->ch_ccw + idx;
struct ccwchain *iter;
u32 ccw_head;
@@ -548,43 +501,124 @@ static int ccwchain_fetch_tic(struct ccwchain *chain,
return -EFAULT;
}
-static int ccwchain_fetch_direct(struct ccwchain *chain,
- int idx,
- struct channel_program *cp)
+static unsigned long *get_guest_idal(struct ccw1 *ccw,
+ struct channel_program *cp,
+ int idaw_nr)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
- struct ccw1 *ccw;
- struct page_array *pa;
- u64 iova;
unsigned long *idaws;
+ unsigned int *idaws_f1;
+ int idal_len = idaw_nr * sizeof(*idaws);
+ int idaw_size = idal_is_2k(cp) ? PAGE_SIZE / 2 : PAGE_SIZE;
+ int idaw_mask = ~(idaw_size - 1);
+ int i, ret;
+
+ idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+ if (!idaws)
+ return ERR_PTR(-ENOMEM);
+
+ if (ccw_is_idal(ccw)) {
+ /* Copy IDAL from guest */
+ ret = vfio_dma_rw(vdev, ccw->cda, idaws, idal_len, false);
+ if (ret) {
+ kfree(idaws);
+ return ERR_PTR(ret);
+ }
+ } else {
+ /* Fabricate an IDAL based off CCW data address */
+ if (cp->orb.cmd.c64) {
+ idaws[0] = ccw->cda;
+ for (i = 1; i < idaw_nr; i++)
+ idaws[i] = (idaws[i - 1] + idaw_size) & idaw_mask;
+ } else {
+ idaws_f1 = (unsigned int *)idaws;
+ idaws_f1[0] = ccw->cda;
+ for (i = 1; i < idaw_nr; i++)
+ idaws_f1[i] = (idaws_f1[i - 1] + idaw_size) & idaw_mask;
+ }
+ }
+
+ return idaws;
+}
+
+/*
+ * ccw_count_idaws() - Calculate the number of IDAWs needed to transfer
+ * a specified amount of data
+ *
+ * @ccw: The Channel Command Word being translated
+ * @cp: Channel Program being processed
+ *
+ * The ORB is examined, since it specifies what IDAWs could actually be
+ * used by any CCW in the channel program, regardless of whether or not
+ * the CCW actually does. An ORB that does not specify Format-2-IDAW
+ * Control could still contain a CCW with an IDAL, which would be
+ * Format-1 and thus only move 2K with each IDAW. Thus all CCWs within
+ * the channel program must follow the same size requirements.
+ */
+static int ccw_count_idaws(struct ccw1 *ccw,
+ struct channel_program *cp)
+{
+ struct vfio_device *vdev =
+ &container_of(cp, struct vfio_ccw_private, cp)->vdev;
+ u64 iova;
+ int size = cp->orb.cmd.c64 ? sizeof(u64) : sizeof(u32);
int ret;
int bytes = 1;
- int idaw_nr, idal_len;
- int i;
-
- ccw = chain->ch_ccw + idx;
if (ccw->count)
bytes = ccw->count;
- /* Calculate size of IDAL */
if (ccw_is_idal(ccw)) {
- /* Read first IDAW to see if it's 4K-aligned or not. */
- /* All subsequent IDAws will be 4K-aligned. */
- ret = copy_from_iova(vdev, &iova, ccw->cda, sizeof(iova));
+ /* Read first IDAW to check its starting address. */
+ /* All subsequent IDAWs will be 2K- or 4K-aligned. */
+ ret = vfio_dma_rw(vdev, ccw->cda, &iova, size, false);
if (ret)
return ret;
+
+ /*
+ * Format-1 IDAWs only occupy the first 32 bits,
+ * and bit 0 is always off.
+ */
+ if (!cp->orb.cmd.c64)
+ iova = iova >> 32;
} else {
iova = ccw->cda;
}
- idaw_nr = idal_nr_words((void *)iova, bytes);
- idal_len = idaw_nr * sizeof(*idaws);
+
+ /* Format-1 IDAWs operate on 2K each */
+ if (!cp->orb.cmd.c64)
+ return idal_2k_nr_words((void *)iova, bytes);
+
+ /* Using the 2K variant of Format-2 IDAWs? */
+ if (cp->orb.cmd.i2k)
+ return idal_2k_nr_words((void *)iova, bytes);
+
+ /* The 'usual' case is 4K Format-2 IDAWs */
+ return idal_nr_words((void *)iova, bytes);
+}
+
+static int ccwchain_fetch_ccw(struct ccw1 *ccw,
+ struct page_array *pa,
+ struct channel_program *cp)
+{
+ struct vfio_device *vdev =
+ &container_of(cp, struct vfio_ccw_private, cp)->vdev;
+ unsigned long *idaws;
+ unsigned int *idaws_f1;
+ int ret;
+ int idaw_nr;
+ int i;
+
+ /* Calculate size of IDAL */
+ idaw_nr = ccw_count_idaws(ccw, cp);
+ if (idaw_nr < 0)
+ return idaw_nr;
/* Allocate an IDAL from host storage */
- idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
- if (!idaws) {
- ret = -ENOMEM;
+ idaws = get_guest_idal(ccw, cp, idaw_nr);
+ if (IS_ERR(idaws)) {
+ ret = PTR_ERR(idaws);
goto out_init;
}
@@ -594,33 +628,24 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
- pa = chain->ch_pa + idx;
- ret = page_array_alloc(pa, iova, bytes);
+ ret = page_array_alloc(pa, idaw_nr);
if (ret < 0)
goto out_free_idaws;
- if (ccw_is_idal(ccw)) {
- /* Copy guest IDAL into host IDAL */
- ret = copy_from_iova(vdev, idaws, ccw->cda, idal_len);
- if (ret)
- goto out_unpin;
-
- /*
- * Copy guest IDAWs into page_array, in case the memory they
- * occupy is not contiguous.
- */
- for (i = 0; i < idaw_nr; i++)
+ /*
+ * Copy guest IDAWs into page_array, in case the memory they
+ * occupy is not contiguous.
+ */
+ idaws_f1 = (unsigned int *)idaws;
+ for (i = 0; i < idaw_nr; i++) {
+ if (cp->orb.cmd.c64)
pa->pa_iova[i] = idaws[i];
- } else {
- /*
- * No action is required here; the iova addresses in page_array
- * were initialized sequentially in page_array_alloc() beginning
- * with the contents of ccw->cda.
- */
+ else
+ pa->pa_iova[i] = idaws_f1[i];
}
if (ccw_does_data_transfer(ccw)) {
- ret = page_array_pin(pa, vdev);
+ ret = page_array_pin(pa, vdev, idal_is_2k(cp));
if (ret < 0)
goto out_unpin;
} else {
@@ -636,7 +661,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
return 0;
out_unpin:
- page_array_unpin_free(pa, vdev);
+ page_array_unpin_free(pa, vdev, idal_is_2k(cp));
out_free_idaws:
kfree(idaws);
out_init:
@@ -650,22 +675,20 @@ out_init:
* and to get rid of the cda 2G limitiaion of ccw1, we'll translate
* direct ccws to idal ccws.
*/
-static int ccwchain_fetch_one(struct ccwchain *chain,
- int idx,
+static int ccwchain_fetch_one(struct ccw1 *ccw,
+ struct page_array *pa,
struct channel_program *cp)
-{
- struct ccw1 *ccw = chain->ch_ccw + idx;
+{
if (ccw_is_tic(ccw))
- return ccwchain_fetch_tic(chain, idx, cp);
+ return ccwchain_fetch_tic(ccw, cp);
- return ccwchain_fetch_direct(chain, idx, cp);
+ return ccwchain_fetch_ccw(ccw, pa, cp);
}
/**
* cp_init() - allocate ccwchains for a channel program.
* @cp: channel_program on which to perform the operation
- * @mdev: the mediated device to perform pin/unpin operations
* @orb: control block for the channel program from the guest
*
* This creates one or more ccwchain(s), and copies the raw data of
@@ -708,15 +731,9 @@ int cp_init(struct channel_program *cp, union orb *orb)
/* Build a ccwchain for the first CCW segment */
ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
- if (!ret) {
+ if (!ret)
cp->initialized = true;
- /* It is safe to force: if it was not set but idals used
- * ccwchain_calc_length would have returned an error.
- */
- cp->orb.cmd.c64 = 1;
- }
-
return ret;
}
@@ -742,7 +759,7 @@ void cp_free(struct channel_program *cp)
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
- page_array_unpin_free(chain->ch_pa + i, vdev);
+ page_array_unpin_free(&chain->ch_pa[i], vdev, idal_is_2k(cp));
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
@@ -789,6 +806,8 @@ void cp_free(struct channel_program *cp)
int cp_prefetch(struct channel_program *cp)
{
struct ccwchain *chain;
+ struct ccw1 *ccw;
+ struct page_array *pa;
int len, idx, ret;
/* this is an error in the caller */
@@ -798,7 +817,10 @@ int cp_prefetch(struct channel_program *cp)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
len = chain->ch_len;
for (idx = 0; idx < len; idx++) {
- ret = ccwchain_fetch_one(chain, idx, cp);
+ ccw = &chain->ch_ccw[idx];
+ pa = &chain->ch_pa[idx];
+
+ ret = ccwchain_fetch_one(ccw, pa, cp);
if (ret)
goto out_err;
}
@@ -817,14 +839,13 @@ out_err:
/**
* cp_get_orb() - get the orb of the channel program
* @cp: channel_program on which to perform the operation
- * @intparm: new intparm for the returned orb
- * @lpm: candidate value of the logical-path mask for the returned orb
+ * @sch: subchannel the operation will be performed against
*
* This function returns the address of the updated orb of the channel
* program. Channel I/O device drivers could use this orb to issue a
* ssch.
*/
-union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
+union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
{
union orb *orb;
struct ccwchain *chain;
@@ -836,12 +857,20 @@ union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
orb = &cp->orb;
- orb->cmd.intparm = intparm;
+ orb->cmd.intparm = (u32)virt_to_phys(sch);
orb->cmd.fmt = 1;
- orb->cmd.key = PAGE_DEFAULT_KEY >> 4;
+
+ /*
+ * Everything built by vfio-ccw is a Format-2 IDAL.
+ * If the input was a Format-1 IDAL, indicate that
+ * 2K Format-2 IDAWs were created here.
+ */
+ if (!orb->cmd.c64)
+ orb->cmd.i2k = 1;
+ orb->cmd.c64 = 1;
if (orb->cmd.lpm == 0)
- orb->cmd.lpm = lpm;
+ orb->cmd.lpm = sch->lpm;
chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
cpa = chain->ch_ccw;
@@ -919,7 +948,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (page_array_iova_pinned(chain->ch_pa + i, iova, length))
+ if (page_array_iova_pinned(&chain->ch_pa[i], iova, length))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index 54d26e242533..fc31eb699807 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -27,7 +27,6 @@
* struct channel_program - manage information for channel program
* @ccwchain_list: list head of ccwchains
* @orb: orb for the currently processed ssch request
- * @mdev: the mediated device to perform page pinning/unpinning
* @initialized: whether this instance is actually initialized
*
* @ccwchain_list is the head of a ccwchain list, that contents the
@@ -44,7 +43,7 @@ struct channel_program {
int cp_init(struct channel_program *cp, union orb *orb);
void cp_free(struct channel_program *cp);
int cp_prefetch(struct channel_program *cp);
-union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
+union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch);
void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 54aba7cceb33..ff538a086fc7 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -225,7 +225,7 @@ static void vfio_ccw_sch_shutdown(struct subchannel *sch)
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
- if (WARN_ON(!private))
+ if (!private)
return;
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 2784a4e4d2be..757b73141246 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -27,7 +27,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
spin_lock_irqsave(sch->lock, flags);
- orb = cp_get_orb(&private->cp, (u32)virt_to_phys(sch), sch->lpm);
+ orb = cp_get_orb(&private->cp, sch);
if (!orb) {
ret = -EIO;
goto out;