summaryrefslogtreecommitdiff
path: root/drivers/xen/gntdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/gntdev.c')
-rw-r--r--drivers/xen/gntdev.c264
1 files changed, 184 insertions, 80 deletions
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index bd56653b9bbc..57390c7666e5 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -6,6 +6,7 @@
*
* Copyright (c) 2006-2007, D G Murray.
* (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -26,10 +27,6 @@
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/mmu_notifier.h>
-#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -37,6 +34,9 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+#include <linux/of_device.h>
+#endif
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -47,6 +47,11 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include "gntdev-common.h"
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+#include "gntdev-dmabuf.h"
+#endif
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
@@ -62,51 +67,23 @@ static atomic_t pages_mapped = ATOMIC_INIT(0);
static int use_ptemod;
#define populate_freeable_maps use_ptemod
-struct gntdev_priv {
- /* maps with visible offsets in the file descriptor */
- struct list_head maps;
- /* maps that are not visible; will be freed on munmap.
- * Only populated if populate_freeable_maps == 1 */
- struct list_head freeable_maps;
- /* lock protects maps and freeable_maps */
- struct mutex lock;
- struct mm_struct *mm;
- struct mmu_notifier mn;
-};
-
-struct unmap_notify {
- int flags;
- /* Address relative to the start of the grant_map */
- int addr;
- int event;
-};
-
-struct grant_map {
- struct list_head next;
- struct vm_area_struct *vma;
- int index;
- int count;
- int flags;
- refcount_t users;
- struct unmap_notify notify;
- struct ioctl_gntdev_grant_ref *grants;
- struct gnttab_map_grant_ref *map_ops;
- struct gnttab_unmap_grant_ref *unmap_ops;
- struct gnttab_map_grant_ref *kmap_ops;
- struct gnttab_unmap_grant_ref *kunmap_ops;
- struct page **pages;
- unsigned long pages_vm_start;
-};
+static int unmap_grant_pages(struct gntdev_grant_map *map,
+ int offset, int pages);
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
+static struct miscdevice gntdev_miscdev;
/* ------------------------------------------------------------------ */
+bool gntdev_account_mapped_pages(int count)
+{
+ return atomic_add_return(count, &pages_mapped) > limit;
+}
+
static void gntdev_print_maps(struct gntdev_priv *priv,
char *text, int text_index)
{
#ifdef DEBUG
- struct grant_map *map;
+ struct gntdev_grant_map *map;
pr_debug("%s: maps list (priv %p)\n", __func__, priv);
list_for_each_entry(map, &priv->maps, next)
@@ -116,13 +93,32 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
#endif
}
-static void gntdev_free_map(struct grant_map *map)
+static void gntdev_free_map(struct gntdev_grant_map *map)
{
if (map == NULL)
return;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ if (map->dma_vaddr) {
+ struct gnttab_dma_alloc_args args;
+
+ args.dev = map->dma_dev;
+ args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
+ args.nr_pages = map->count;
+ args.pages = map->pages;
+ args.frames = map->frames;
+ args.vaddr = map->dma_vaddr;
+ args.dev_bus_addr = map->dma_bus_addr;
+
+ gnttab_dma_free_pages(&args);
+ } else
+#endif
if (map->pages)
gnttab_free_pages(map->count, map->pages);
+
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ kfree(map->frames);
+#endif
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
@@ -132,12 +128,13 @@ static void gntdev_free_map(struct grant_map *map)
kfree(map);
}
-static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
+ int dma_flags)
{
- struct grant_map *add;
+ struct gntdev_grant_map *add;
int i;
- add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
+ add = kzalloc(sizeof(*add), GFP_KERNEL);
if (NULL == add)
return NULL;
@@ -155,6 +152,37 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
NULL == add->pages)
goto err;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ add->dma_flags = dma_flags;
+
+ /*
+ * Check if this mapping is requested to be backed
+ * by a DMA buffer.
+ */
+ if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
+ struct gnttab_dma_alloc_args args;
+
+ add->frames = kcalloc(count, sizeof(add->frames[0]),
+ GFP_KERNEL);
+ if (!add->frames)
+ goto err;
+
+ /* Remember the device, so we can free DMA memory. */
+ add->dma_dev = priv->dma_dev;
+
+ args.dev = priv->dma_dev;
+ args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
+ args.nr_pages = count;
+ args.pages = add->pages;
+ args.frames = add->frames;
+
+ if (gnttab_dma_alloc_pages(&args))
+ goto err;
+
+ add->dma_vaddr = args.vaddr;
+ add->dma_bus_addr = args.dev_bus_addr;
+ } else
+#endif
if (gnttab_alloc_pages(count, add->pages))
goto err;
@@ -176,9 +204,9 @@ err:
return NULL;
}
-static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
+void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
{
- struct grant_map *map;
+ struct gntdev_grant_map *map;
list_for_each_entry(map, &priv->maps, next) {
if (add->index + add->count < map->index) {
@@ -193,10 +221,10 @@ done:
gntdev_print_maps(priv, "[new]", add->index);
}
-static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
- int index, int count)
+static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
+ int index, int count)
{
- struct grant_map *map;
+ struct gntdev_grant_map *map;
list_for_each_entry(map, &priv->maps, next) {
if (map->index != index)
@@ -208,7 +236,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
return NULL;
}
-static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
+void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
{
if (!map)
return;
@@ -239,7 +267,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
static int find_grant_ptes(pte_t *pte, pgtable_t token,
unsigned long addr, void *data)
{
- struct grant_map *map = data;
+ struct gntdev_grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
u64 pte_maddr;
@@ -272,7 +300,7 @@ static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
}
#endif
-static int map_grant_pages(struct grant_map *map)
+int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
int i, err = 0;
@@ -325,11 +353,20 @@ static int map_grant_pages(struct grant_map *map)
map->unmap_ops[i].handle = map->map_ops[i].handle;
if (use_ptemod)
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ else if (map->dma_vaddr) {
+ unsigned long bfn;
+
+ bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
+ map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
+ }
+#endif
}
return err;
}
-static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
int i, err = 0;
struct gntab_unmap_queue_data unmap_data;
@@ -364,7 +401,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
return err;
}
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
int range, err = 0;
@@ -396,7 +434,7 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
static void gntdev_vma_open(struct vm_area_struct *vma)
{
- struct grant_map *map = vma->vm_private_data;
+ struct gntdev_grant_map *map = vma->vm_private_data;
pr_debug("gntdev_vma_open %p\n", vma);
refcount_inc(&map->users);
@@ -404,7 +442,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
static void gntdev_vma_close(struct vm_area_struct *vma)
{
- struct grant_map *map = vma->vm_private_data;
+ struct gntdev_grant_map *map = vma->vm_private_data;
struct file *file = vma->vm_file;
struct gntdev_priv *priv = file->private_data;
@@ -428,7 +466,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
unsigned long addr)
{
- struct grant_map *map = vma->vm_private_data;
+ struct gntdev_grant_map *map = vma->vm_private_data;
return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
}
@@ -441,18 +479,25 @@ static const struct vm_operations_struct gntdev_vmops = {
/* ------------------------------------------------------------------ */
-static void unmap_if_in_range(struct grant_map *map,
+static bool in_range(struct gntdev_grant_map *map,
unsigned long start, unsigned long end)
{
- unsigned long mstart, mend;
- int err;
-
if (!map->vma)
- return;
+ return false;
if (map->vma->vm_start >= end)
- return;
+ return false;
if (map->vma->vm_end <= start)
- return;
+ return false;
+
+ return true;
+}
+
+static void unmap_if_in_range(struct gntdev_grant_map *map,
+ unsigned long start, unsigned long end)
+{
+ unsigned long mstart, mend;
+ int err;
+
mstart = max(start, map->vma->vm_start);
mend = min(end, map->vma->vm_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -465,28 +510,47 @@ static void unmap_if_in_range(struct grant_map *map,
WARN_ON(err);
}
-static void mn_invl_range_start(struct mmu_notifier *mn,
+static int mn_invl_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ bool blockable)
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct grant_map *map;
+ struct gntdev_grant_map *map;
+ int ret = 0;
+
+ /* TODO do we really need a mutex here? */
+ if (blockable)
+ mutex_lock(&priv->lock);
+ else if (!mutex_trylock(&priv->lock))
+ return -EAGAIN;
- mutex_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
+ if (in_range(map, start, end)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
unmap_if_in_range(map, start, end);
}
list_for_each_entry(map, &priv->freeable_maps, next) {
+ if (in_range(map, start, end)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
unmap_if_in_range(map, start, end);
}
+
+out_unlock:
mutex_unlock(&priv->lock);
+
+ return ret;
}
static void mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int err;
mutex_lock(&priv->lock);
@@ -531,6 +595,15 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->freeable_maps);
mutex_init(&priv->lock);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ priv->dmabuf_priv = gntdev_dmabuf_init();
+ if (IS_ERR(priv->dmabuf_priv)) {
+ ret = PTR_ERR(priv->dmabuf_priv);
+ kfree(priv);
+ return ret;
+ }
+#endif
+
if (use_ptemod) {
priv->mm = get_task_mm(current);
if (!priv->mm) {
@@ -548,6 +621,17 @@ static int gntdev_open(struct inode *inode, struct file *flip)
}
flip->private_data = priv;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ priv->dma_dev = gntdev_miscdev.this_device;
+
+ /*
+ * The device is not spawn from a device tree, so arch_setup_dma_ops
+ * is not called, thus leaving the device with dummy DMA ops.
+ * Fix this by calling of_dma_configure() with a NULL node to set
+ * default DMA ops.
+ */
+ of_dma_configure(priv->dma_dev, NULL, true);
+#endif
pr_debug("priv %p\n", priv);
return 0;
@@ -556,21 +640,27 @@ static int gntdev_open(struct inode *inode, struct file *flip)
static int gntdev_release(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv = flip->private_data;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
pr_debug("priv %p\n", priv);
mutex_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
- map = list_entry(priv->maps.next, struct grant_map, next);
+ map = list_entry(priv->maps.next,
+ struct gntdev_grant_map, next);
list_del(&map->next);
gntdev_put_map(NULL /* already removed */, map);
}
WARN_ON(!list_empty(&priv->freeable_maps));
mutex_unlock(&priv->lock);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ gntdev_dmabuf_fini(priv->dmabuf_priv);
+#endif
+
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
+
kfree(priv);
return 0;
}
@@ -579,7 +669,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
struct ioctl_gntdev_map_grant_ref __user *u)
{
struct ioctl_gntdev_map_grant_ref op;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int err;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -589,11 +679,11 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
return -EINVAL;
err = -ENOMEM;
- map = gntdev_alloc_map(priv, op.count);
+ map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
if (!map)
return err;
- if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
+ if (unlikely(gntdev_account_mapped_pages(op.count))) {
pr_debug("can't map: over limit\n");
gntdev_put_map(NULL, map);
return err;
@@ -620,7 +710,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
struct ioctl_gntdev_unmap_grant_ref __user *u)
{
struct ioctl_gntdev_unmap_grant_ref op;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int err = -ENOENT;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -646,7 +736,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
{
struct ioctl_gntdev_get_offset_for_vaddr op;
struct vm_area_struct *vma;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int rv = -EINVAL;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -677,7 +767,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{
struct ioctl_gntdev_unmap_notify op;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int rc;
int out_flags;
unsigned int out_event;
@@ -962,6 +1052,20 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_GRANT_COPY:
return gntdev_ioctl_grant_copy(priv, ptr);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
+ return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
+ return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
+ return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
+ return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
+#endif
+
default:
pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
return -ENOIOCTLCMD;
@@ -975,7 +1079,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
struct gntdev_priv *priv = flip->private_data;
int index = vma->vm_pgoff;
int count = vma_pages(vma);
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int i, err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
@@ -1032,7 +1136,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
}
}
- err = map_grant_pages(map);
+ err = gntdev_map_grant_pages(map);
if (err)
goto out_put_map;