summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
authorRob Clark <robdclark@gmail.com>2016-05-17 23:19:32 +0300
committerRob Clark <robdclark@gmail.com>2016-07-16 17:09:06 +0300
commit68209390f116034449fa6a3ae03f7b100b3d894a (patch)
tree00fae111275f1785a910792c21261b6d52876321 /drivers/gpu/drm/msm/msm_gem.c
parent4fe5f65e66823dcb212a0404af47389b2b1c58f0 (diff)
downloadlinux-68209390f116034449fa6a3ae03f7b100b3d894a.tar.xz
drm/msm: shrinker support
For a first step, only purge obj->madv==DONTNEED objects. We could be more agressive and next try unpinning inactive objects.. but that is only useful if you have swap. Signed-off-by: Rob Clark <robdclark@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2636c279d504..444d0b5680f5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -448,6 +448,38 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
return (msm_obj->madv != __MSM_MADV_PURGED);
}
+void msm_gem_purge(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!is_purgeable(msm_obj));
+ WARN_ON(obj->import_attach);
+
+ put_iova(obj);
+
+ vunmap(msm_obj->vaddr);
+ msm_obj->vaddr = NULL;
+
+ put_pages(obj);
+
+ msm_obj->madv = __MSM_MADV_PURGED;
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+ drm_gem_free_mmap_offset(obj);
+
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
+
+ invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
+ 0, (loff_t)-1);
+}
+
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)