diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_shrinker.c')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gem_shrinker.c | 166 | 
1 files changed, 116 insertions, 50 deletions
| diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 9d5248be746f..1187ecf9d647 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -9,57 +9,140 @@  #include "msm_gpu.h"  #include "msm_gpu_trace.h" +/* Default disabled for now until it has some more testing on the different + * iommu combinations that can be paired with the driver: + */ +bool enable_eviction = false; +MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers"); +module_param(enable_eviction, bool, 0600); + +static bool can_swap(void) +{ +	return enable_eviction && get_nr_swap_pages() > 0; +} +  static unsigned long  msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)  {  	struct msm_drm_private *priv =  		container_of(shrinker, struct msm_drm_private, shrinker); -	struct msm_gem_object *msm_obj; -	unsigned long count = 0; +	unsigned count = priv->shrinkable_count; -	mutex_lock(&priv->mm_lock); +	if (can_swap()) +		count += priv->evictable_count; -	list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { -		if (!msm_gem_trylock(&msm_obj->base)) -			continue; -		if (is_purgeable(msm_obj)) -			count += msm_obj->base.size >> PAGE_SHIFT; -		msm_gem_unlock(&msm_obj->base); -	} +	return count; +} -	mutex_unlock(&priv->mm_lock); +static bool +purge(struct msm_gem_object *msm_obj) +{ +	if (!is_purgeable(msm_obj)) +		return false; -	return count; +	/* +	 * This will move the obj out of still_in_list to +	 * the purged list +	 */ +	msm_gem_purge(&msm_obj->base); + +	return true; +} + +static bool +evict(struct msm_gem_object *msm_obj) +{ +	if (is_unevictable(msm_obj)) +		return false; + +	msm_gem_evict(&msm_obj->base); + +	return true;  }  static unsigned long -msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) +scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list, +		bool (*shrink)(struct msm_gem_object *msm_obj))  { -	struct msm_drm_private *priv = -		container_of(shrinker, struct msm_drm_private, shrinker); -	struct msm_gem_object *msm_obj; -	unsigned long freed = 0; +	unsigned freed = 0; +	struct list_head still_in_list; + +	INIT_LIST_HEAD(&still_in_list);  	mutex_lock(&priv->mm_lock); -	list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { -		if (freed >= sc->nr_to_scan) +	while (freed < nr_to_scan) { +		struct msm_gem_object *msm_obj = list_first_entry_or_null( +				list, typeof(*msm_obj), mm_list); + +		if (!msm_obj)  			break; -		if (!msm_gem_trylock(&msm_obj->base)) + +		list_move_tail(&msm_obj->mm_list, &still_in_list); + +		/* +		 * If it is in the process of being freed, msm_gem_free_object +		 * can be blocked on mm_lock waiting to remove it.  So just +		 * skip it. +		 */ +		if (!kref_get_unless_zero(&msm_obj->base.refcount))  			continue; -		if (is_purgeable(msm_obj)) { -			msm_gem_purge(&msm_obj->base); + +		/* +		 * Now that we own a reference, we can drop mm_lock for the +		 * rest of the loop body, to reduce contention with the +		 * retire_submit path (which could make more objects purgeable) +		 */ + +		mutex_unlock(&priv->mm_lock); + +		/* +		 * Note that this still needs to be trylock, since we can +		 * hit shrinker in response to trying to get backing pages +		 * for this obj (ie. while it's lock is already held) +		 */ +		if (!msm_gem_trylock(&msm_obj->base)) +			goto tail; + +		if (shrink(msm_obj))  			freed += msm_obj->base.size >> PAGE_SHIFT; -		} +  		msm_gem_unlock(&msm_obj->base); + +tail: +		drm_gem_object_put(&msm_obj->base); +		mutex_lock(&priv->mm_lock);  	} +	list_splice_tail(&still_in_list, list);  	mutex_unlock(&priv->mm_lock); +	return freed; +} + +static unsigned long +msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) +{ +	struct msm_drm_private *priv = +		container_of(shrinker, struct msm_drm_private, shrinker); +	unsigned long freed; + +	freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge); +  	if (freed > 0)  		trace_msm_gem_purge(freed << PAGE_SHIFT); -	return freed; +	if (can_swap() && freed < sc->nr_to_scan) { +		int evicted = scan(priv, sc->nr_to_scan - freed, +				&priv->inactive_willneed, evict); + +		if (evicted > 0) +			trace_msm_gem_evict(evicted << PAGE_SHIFT); + +		freed += evicted; +	} + +	return (freed > 0) ? freed : SHRINK_STOP;  }  /* since we don't know any better, lets bail after a few @@ -68,26 +151,15 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)   */  static const int vmap_shrink_limit = 15; -static unsigned -vmap_shrink(struct list_head *mm_list) +static bool +vmap_shrink(struct msm_gem_object *msm_obj)  { -	struct msm_gem_object *msm_obj; -	unsigned unmapped = 0; +	if (!is_vunmapable(msm_obj)) +		return false; -	list_for_each_entry(msm_obj, mm_list, mm_list) { -		if (!msm_gem_trylock(&msm_obj->base)) -			continue; -		if (is_vunmapable(msm_obj)) { -			msm_gem_vunmap(&msm_obj->base); -			unmapped++; -		} -		msm_gem_unlock(&msm_obj->base); +	msm_gem_vunmap(&msm_obj->base); -		if (++unmapped >= vmap_shrink_limit) -			break; -	} - -	return unmapped; +	return true;  }  static int @@ -103,17 +175,11 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)  	};  	unsigned idx, unmapped = 0; -	mutex_lock(&priv->mm_lock); - -	for (idx = 0; mm_lists[idx]; idx++) { -		unmapped += vmap_shrink(mm_lists[idx]); - -		if (unmapped >= vmap_shrink_limit) -			break; +	for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) { +		unmapped += scan(priv, vmap_shrink_limit - unmapped, +				mm_lists[idx], vmap_shrink);  	} -	mutex_unlock(&priv->mm_lock); -  	*(unsigned long *)ptr += unmapped;  	if (unmapped > 0) | 
