summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/omapdrm/omap_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_gem.c')
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c311
1 files changed, 176 insertions, 135 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 7ed08fdc4c42..8495a1a4b617 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -19,30 +19,22 @@
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
+#include <linux/pfn_t.h>
#include <drm/drm_vma_manager.h>
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
-/* remove these once drm core helpers are merged */
-struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
-void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
- bool dirty, bool accessed);
-int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
-
/*
* GEM buffer object implementation.
*/
-#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
-
/* note: we use upper 8 bits of flags for driver-internal flags: */
-#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
+#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
-
struct omap_gem_object {
struct drm_gem_object base;
@@ -119,8 +111,7 @@ struct omap_gem_object {
} *sync;
};
-static int get_pages(struct drm_gem_object *obj, struct page ***pages);
-static uint64_t mmap_offset(struct drm_gem_object *obj);
+#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
* not necessarily pinned in TILER all the time, and (b) when they are
@@ -134,27 +125,69 @@ static uint64_t mmap_offset(struct drm_gem_object *obj);
* for later..
*/
#define NUM_USERGART_ENTRIES 2
-struct usergart_entry {
+struct omap_drm_usergart_entry {
struct tiler_block *block; /* the reserved tiler block */
dma_addr_t paddr;
struct drm_gem_object *obj; /* the current pinned obj */
pgoff_t obj_pgoff; /* page offset of obj currently
mapped in */
};
-static struct {
- struct usergart_entry entry[NUM_USERGART_ENTRIES];
+
+struct omap_drm_usergart {
+ struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
int height; /* height in rows */
int height_shift; /* ilog2(height in rows) */
int slot_shift; /* ilog2(width per slot) */
int stride_pfn; /* stride in pages */
int last; /* index of last used entry */
-} *usergart;
+};
+
+/* -----------------------------------------------------------------------------
+ * Helpers
+ */
+
+/** get mmap offset */
+static uint64_t mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+ size_t size;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ /* Make it mmapable */
+ size = omap_gem_mmap_size(obj);
+ ret = drm_gem_create_mmap_offset_size(obj, size);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate mmap offset\n");
+ return 0;
+ }
+
+ return drm_vma_node_offset_addr(&obj->vma_node);
+}
+
+/* GEM objects can either be allocated from contiguous memory (in which
+ * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
+ * contiguous buffers can be remapped in TILER/DMM if they need to be
+ * contiguous... but we don't do this all the time to reduce pressure
+ * on TILER/DMM space when we know at allocation time that the buffer
+ * will need to be scanned out.
+ */
+static inline bool is_shmem(struct drm_gem_object *obj)
+{
+ return obj->filp != NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Eviction
+ */
static void evict_entry(struct drm_gem_object *obj,
- enum tiler_fmt fmt, struct usergart_entry *entry)
+ enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int n = usergart[fmt].height;
+ struct omap_drm_private *priv = obj->dev->dev_private;
+ int n = priv->usergart[fmt].height;
size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
@@ -180,46 +213,25 @@ static void evict_entry(struct drm_gem_object *obj,
static void evict(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct omap_drm_private *priv = obj->dev->dev_private;
if (omap_obj->flags & OMAP_BO_TILED) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
- if (!usergart)
- return;
-
for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
- struct usergart_entry *entry = &usergart[fmt].entry[i];
+ struct omap_drm_usergart_entry *entry =
+ &priv->usergart[fmt].entry[i];
+
if (entry->obj == obj)
evict_entry(obj, fmt, entry);
}
}
}
-/* GEM objects can either be allocated from contiguous memory (in which
- * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
- * contiguous buffers can be remapped in TILER/DMM if they need to be
- * contiguous... but we don't do this all the time to reduce pressure
- * on TILER/DMM space when we know at allocation time that the buffer
- * will need to be scanned out.
- */
-static inline bool is_shmem(struct drm_gem_object *obj)
-{
- return obj->filp != NULL;
-}
-
-/**
- * shmem buffers that are mapped cached can simulate coherency via using
- * page faulting to keep track of dirty pages
+/* -----------------------------------------------------------------------------
+ * Page Management
*/
-static inline bool is_cached_coherent(struct drm_gem_object *obj)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- return is_shmem(obj) &&
- ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
-}
-
-static DEFINE_SPINLOCK(sync_lock);
/** ensure backing pages are allocated */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
@@ -272,6 +284,28 @@ free_pages:
return ret;
}
+/* acquire pages when needed (for example, for DMA where physically
+ * contiguous buffer is not required
+ */
+static int get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ if (is_shmem(obj) && !omap_obj->pages) {
+ ret = omap_gem_attach_pages(obj);
+ if (ret) {
+ dev_err(obj->dev->dev, "could not attach pages\n");
+ return ret;
+ }
+ }
+
+ /* TODO: even phys-contig.. we should have a list of pages? */
+ *pages = omap_obj->pages;
+
+ return 0;
+}
+
/** release backing pages */
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{
@@ -301,26 +335,6 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
return to_omap_bo(obj)->flags;
}
-/** get mmap offset */
-static uint64_t mmap_offset(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- int ret;
- size_t size;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- /* Make it mmapable */
- size = omap_gem_mmap_size(obj);
- ret = drm_gem_create_mmap_offset_size(obj, size);
- if (ret) {
- dev_err(dev->dev, "could not allocate mmap offset\n");
- return 0;
- }
-
- return drm_vma_node_offset_addr(&obj->vma_node);
-}
-
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
@@ -362,6 +376,10 @@ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
return -EINVAL;
}
+/* -----------------------------------------------------------------------------
+ * Fault Handling
+ */
+
/* Normal handling for the case of faulting in non-tiled buffers */
static int fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -385,7 +403,8 @@ static int fault_1d(struct drm_gem_object *obj,
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT);
- return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+ return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
+ __pfn_to_pfn_t(pfn, PFN_DEV));
}
/* Special handling for the case of faulting in 2d tiled buffers */
@@ -393,7 +412,8 @@ static int fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- struct usergart_entry *entry;
+ struct omap_drm_private *priv = obj->dev->dev_private;
+ struct omap_drm_usergart_entry *entry;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct page *pages[64]; /* XXX is this too much to have on stack? */
unsigned long pfn;
@@ -406,8 +426,8 @@ static int fault_2d(struct drm_gem_object *obj,
* that need to be mapped in to fill 4kb wide CPU page. If the slot
* height is 64, then 64 pages fill a 4kb wide by 64 row region.
*/
- const int n = usergart[fmt].height;
- const int n_shift = usergart[fmt].height_shift;
+ const int n = priv->usergart[fmt].height;
+ const int n_shift = priv->usergart[fmt].height_shift;
/*
* If buffer width in bytes > PAGE_SIZE then the virtual stride is
@@ -428,11 +448,11 @@ static int fault_2d(struct drm_gem_object *obj,
base_pgoff = round_down(pgoff, m << n_shift);
/* figure out buffer width in slots */
- slots = omap_obj->width >> usergart[fmt].slot_shift;
+ slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
- entry = &usergart[fmt].entry[usergart[fmt].last];
+ entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
@@ -478,13 +498,15 @@ static int fault_2d(struct drm_gem_object *obj,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
- vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
- pfn += usergart[fmt].stride_pfn;
+ vm_insert_mixed(vma, (unsigned long)vaddr,
+ __pfn_to_pfn_t(pfn, PFN_DEV));
+ pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
/* simple round-robin: */
- usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
+ priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
+ % NUM_USERGART_ENTRIES;
return 0;
}
@@ -596,6 +618,9 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
return 0;
}
+/* -----------------------------------------------------------------------------
+ * Dumb Buffers
+ */
/**
* omap_gem_dumb_create - create a dumb buffer
@@ -653,6 +678,7 @@ fail:
return ret;
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Set scrolling position. This allows us to implement fast scrolling
* for console.
*
@@ -689,6 +715,22 @@ fail:
return ret;
}
+#endif
+
+/* -----------------------------------------------------------------------------
+ * Memory Management & DMA Sync
+ */
+
+/**
+ * shmem buffers that are mapped cached can simulate coherency via using
+ * page faulting to keep track of dirty pages
+ */
+static inline bool is_cached_coherent(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ return is_shmem(obj) &&
+ ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
+}
/* Sync the buffer for CPU access.. note pages should already be
* attached, ie. omap_gem_get_pages()
@@ -865,28 +907,6 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
return ret;
}
-/* acquire pages when needed (for example, for DMA where physically
- * contiguous buffer is not required
- */
-static int get_pages(struct drm_gem_object *obj, struct page ***pages)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
-
- if (is_shmem(obj) && !omap_obj->pages) {
- ret = omap_gem_attach_pages(obj);
- if (ret) {
- dev_err(obj->dev->dev, "could not attach pages\n");
- return ret;
- }
- }
-
- /* TODO: even phys-contig.. we should have a list of pages? */
- *pages = omap_obj->pages;
-
- return 0;
-}
-
/* if !remap, and we don't have pages backing, then fail, rather than
* increasing the pin count (which we don't really do yet anyways,
* because we don't support swapping pages back out). And 'remap'
@@ -924,6 +944,7 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
return 0;
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Get kernel virtual address for CPU access.. this more or less only
* exists for omap_fbdev. This should be called with struct_mutex
* held.
@@ -942,6 +963,11 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
}
return omap_obj->vaddr;
}
+#endif
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
#ifdef CONFIG_PM
/* re-pin objects in DMM in resume path: */
@@ -971,6 +997,10 @@ int omap_gem_resume(struct device *dev)
}
#endif
+/* -----------------------------------------------------------------------------
+ * DebugFS
+ */
+
#ifdef CONFIG_DEBUG_FS
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
@@ -1017,9 +1047,12 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
}
#endif
-/* Buffer Synchronization:
+/* -----------------------------------------------------------------------------
+ * Buffer Synchronization
*/
+static DEFINE_SPINLOCK(sync_lock);
+
struct omap_gem_sync_waiter {
struct list_head list;
struct omap_gem_object *omap_obj;
@@ -1265,6 +1298,10 @@ unlock:
return ret;
}
+/* -----------------------------------------------------------------------------
+ * Constructor & Destructor
+ */
+
/* don't call directly.. called from GEM core when it is time to actually
* free the object..
*/
@@ -1282,8 +1319,6 @@ void omap_gem_free_object(struct drm_gem_object *obj)
list_del(&omap_obj->mm_list);
spin_unlock(&priv->list_lock);
- drm_gem_free_mmap_offset(obj);
-
/* this means the object is still pinned.. which really should
* not happen. I think..
*/
@@ -1308,31 +1343,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
drm_gem_object_release(obj);
- kfree(obj);
-}
-
-/* convenience method to construct a GEM buffer object, and userspace handle */
-int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = omap_gem_new(dev, gsize, flags);
- if (!obj)
- return -ENOMEM;
-
- ret = drm_gem_handle_create(file, obj, handle);
- if (ret) {
- drm_gem_object_release(obj);
- kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
- return ret;
- }
-
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
-
- return 0;
+ kfree(omap_obj);
}
/* GEM buffer object constructor */
@@ -1341,15 +1352,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
- struct drm_gem_object *obj = NULL;
+ struct drm_gem_object *obj;
struct address_space *mapping;
size_t size;
int ret;
if (flags & OMAP_BO_TILED) {
- if (!usergart) {
+ if (!priv->usergart) {
dev_err(dev->dev, "Tiled buffers require DMM\n");
- goto fail;
+ return NULL;
}
/* tiled buffers are always shmem paged backed.. when they are
@@ -1420,16 +1431,42 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
return obj;
fail:
- if (obj)
+ omap_gem_free_object(obj);
+ return NULL;
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = omap_gem_new(dev, gsize, flags);
+ if (!obj)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, obj, handle);
+ if (ret) {
omap_gem_free_object(obj);
+ return ret;
+ }
- return NULL;
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
}
-/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
+/* -----------------------------------------------------------------------------
+ * Init & Cleanup
+ */
+
+/* If DMM is used, we need to set some stuff up.. */
void omap_gem_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_usergart *usergart;
const enum tiler_fmt fmts[] = {
TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
};
@@ -1458,10 +1495,11 @@ void omap_gem_init(struct drm_device *dev)
usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
- struct usergart_entry *entry = &usergart[i].entry[j];
- struct tiler_block *block =
- tiler_reserve_2d(fmts[i], w, h,
- PAGE_SIZE);
+ struct omap_drm_usergart_entry *entry;
+ struct tiler_block *block;
+
+ entry = &usergart[i].entry[j];
+ block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
if (IS_ERR(block)) {
dev_err(dev->dev,
"reserve failed: %d, %d, %ld\n",
@@ -1477,13 +1515,16 @@ void omap_gem_init(struct drm_device *dev)
}
}
+ priv->usergart = usergart;
priv->has_dmm = true;
}
void omap_gem_deinit(struct drm_device *dev)
{
+ struct omap_drm_private *priv = dev->dev_private;
+
/* I believe we can rely on there being no more outstanding GEM
* objects which could depend on usergart/dmm at this point.
*/
- kfree(usergart);
+ kfree(priv->usergart);
}