diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_drv.h')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_drv.h | 29 | 
1 files changed, 22 insertions, 7 deletions
| diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 591c47a654e8..2668941df529 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -174,20 +174,35 @@ struct msm_drm_private {  	struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */  	struct msm_perf_state *perf; -	/* -	 * Lists of inactive GEM objects.  Every bo is either in one of the +	/** +	 * List of all GEM objects (mainly for debugfs, protected by obj_lock +	 * (acquire before per GEM object lock) +	 */ +	struct list_head objects; +	struct mutex obj_lock; + +	/** +	 * LRUs of inactive GEM objects.  Every bo is either in one of the  	 * inactive lists (depending on whether or not it is shrinkable) or -	 * gpu->active_list (for the gpu it is active on[1]) +	 * gpu->active_list (for the gpu it is active on[1]), or transiently +	 * on a temporary list as the shrinker is running. +	 * +	 * Note that inactive_willneed also contains pinned and vmap'd bos, +	 * but the number of pinned-but-not-active objects is small (scanout +	 * buffers, ringbuffer, etc).  	 * -	 * These lists are protected by mm_lock.  If struct_mutex is involved, it -	 * should be aquired prior to mm_lock.  One should *not* hold mm_lock in +	 * These lists are protected by mm_lock (which should be acquired +	 * before per GEM object lock).  One should *not* hold mm_lock in  	 * get_pages()/vmap()/etc paths, as they can trigger the shrinker.  	 *  	 * [1] if someone ever added support for the old 2d cores, there could be  	 *     more than one gpu object  	 */ -	struct list_head inactive_willneed;  /* inactive + !shrinkable */ -	struct list_head inactive_dontneed;  /* inactive +  shrinkable */ +	struct list_head inactive_willneed;  /* inactive + potentially unpin/evictable */ +	struct list_head inactive_dontneed;  /* inactive + shrinkable */ +	struct list_head inactive_unpinned;  /* inactive + purged or unpinned */ +	long shrinkable_count;               /* write access under mm_lock */ +	long evictable_count;                /* write access under mm_lock */  	struct mutex mm_lock;  	struct workqueue_struct *wq; | 
