diff options
author | Christian König <christian.koenig@amd.com> | 2020-10-06 18:26:42 +0300 |
---|---|---|
committer | Christian König <christian.koenig@amd.com> | 2021-03-24 19:05:25 +0300 |
commit | a1f091f8ef2b680a5184db065527612247cb4cae (patch) | |
tree | 656a35294d816b5334ca4b4bfdca4c1a87a2aa3d /include/drm | |
parent | f9e2a03e110ad0c78e69201f59d18dc1c487efac (diff) | |
download | linux-a1f091f8ef2b680a5184db065527612247cb4cae.tar.xz |
drm/ttm: switch to per device LRU lock
Instead of having a global lock for potentially less contention.
Signed-off-by: Christian König <christian.koenig@amd.com>
Tested-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/424010/
Diffstat (limited to 'include/drm')
-rw-r--r-- | include/drm/ttm/ttm_bo_driver.h | 4 | ||||
-rw-r--r-- | include/drm/ttm/ttm_device.h | 4 |
2 files changed, 4 insertions, 4 deletions
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index d007feef7676..dbccac957f8f 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -180,9 +180,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) { - spin_lock(&ttm_glob.lru_lock); + spin_lock(&bo->bdev->lru_lock); ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); - spin_unlock(&ttm_glob.lru_lock); + spin_unlock(&bo->bdev->lru_lock); } static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h index 93f3b59755ac..7c8f87bd52d3 100644 --- a/include/drm/ttm/ttm_device.h +++ b/include/drm/ttm/ttm_device.h @@ -56,7 +56,6 @@ extern struct ttm_global { */ struct page *dummy_read_page; - spinlock_t lru_lock; /** * Protected by ttm_global_mutex. @@ -277,8 +276,9 @@ struct ttm_device { struct ttm_pool pool; /* - * Protected by the global:lru lock. + * Protection for the per manager LRU and ddestroy lists. */ + spinlock_t lru_lock; struct list_head ddestroy; /* |