diff options
author | Christian König <christian.koenig@amd.com> | 2016-04-06 12:12:04 +0300 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-05-05 03:21:26 +0300 |
commit | 8aa6d4fc5f470c5e4363c705bbae96ccb1b033b0 (patch) | |
tree | 79478c3db6509e6f8da3a5d3624602b7d022bacb /drivers/gpu | |
parent | dfd5e50ea43ca4a89de061fb69618299760eb682 (diff) | |
download | linux-8aa6d4fc5f470c5e4363c705bbae96ccb1b033b0.tar.xz |
drm/ttm: remove lazy parameter from ttm_bo_wait
Not used any more.
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_cmd.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_object.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_object.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 4 |
12 files changed, 23 insertions, 23 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ea8928671632..5fe500033a95 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1322,7 +1322,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, } /* Fallback to software copy. */ - ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); + ret = ttm_bo_wait(bo, intr, no_wait_gpu); if (ret == 0) ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 8d64b65770e0..185aaaa0c85d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -126,7 +126,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma) list_del(&vma->head); if (fobj && fobj->shared_count > 1) - ttm_bo_wait(&nvbo->bo, true, false, false); + ttm_bo_wait(&nvbo->bo, false, false); else if (fobj && fobj->shared_count == 1) fence = rcu_dereference_protected(fobj->shared[0], reservation_object_held(resv)); @@ -651,7 +651,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, data |= r->vor; } - ret = ttm_bo_wait(&nvbo->bo, true, false, false); + ret = ttm_bo_wait(&nvbo->bo, false, false); if (ret) { NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret); break; diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index fdc1833b1af8..b5d4b41361bd 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -624,7 +624,7 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal if (stall) mutex_unlock(&qdev->surf_evict_mutex); - ret = ttm_bo_wait(&surf->tbo, true, true, !stall); + ret = ttm_bo_wait(&surf->tbo, true, !stall); if (stall) mutex_lock(&qdev->surf_evict_mutex); diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index 483f131cefdf..4d8311373ba3 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -79,7 +79,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, if (mem_type) *mem_type = bo->tbo.mem.mem_type; - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); + r = ttm_bo_wait(&bo->tbo, true, no_wait); ttm_bo_unreserve(&bo->tbo); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 7e0c16c74cf3..be30861afae9 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -838,7 +838,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) if (mem_type) *mem_type = bo->tbo.mem.mem_type; - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); + r = ttm_bo_wait(&bo->tbo, true, no_wait); ttm_bo_unreserve(&bo->tbo); return r; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 75f04b5f8c09..2631a4d25622 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -455,7 +455,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ret = __ttm_bo_reserve(bo, false, true, NULL); if (!ret) { - if (!ttm_bo_wait(bo, false, false, true)) { + if (!ttm_bo_wait(bo, false, true)) { put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); @@ -508,7 +508,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, int put_count; int ret; - ret = ttm_bo_wait(bo, false, false, true); + ret = ttm_bo_wait(bo, false, true); if (ret && !no_wait_gpu) { long lret; @@ -545,7 +545,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, * remove sync_obj with ttm_bo_wait, the wait should be * finished, and no new wait object should have been added. */ - ret = ttm_bo_wait(bo, false, false, true); + ret = ttm_bo_wait(bo, false, true); WARN_ON(ret); } @@ -684,7 +684,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, struct ttm_placement placement; int ret = 0; - ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) { @@ -1006,7 +1006,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * Have the driver move function wait for idle when necessary, * instead of doing it here. */ - ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); if (ret) return ret; } @@ -1567,7 +1567,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unmap_virtual); int ttm_bo_wait(struct ttm_buffer_object *bo, - bool lazy, bool interruptible, bool no_wait) + bool interruptible, bool no_wait) { struct reservation_object_list *fobj; struct reservation_object *resv; @@ -1625,7 +1625,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ret = ttm_bo_reserve(bo, true, no_wait, NULL); if (unlikely(ret != 0)) return ret; - ret = ttm_bo_wait(bo, false, true, no_wait); + ret = ttm_bo_wait(bo, true, no_wait); if (likely(ret == 0)) atomic_inc(&bo->cpu_writers); ttm_bo_unreserve(bo); @@ -1682,7 +1682,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) * Wait for GPU, then move to system cached. */ - ret = ttm_bo_wait(bo, false, false, false); + ret = ttm_bo_wait(bo, false, false); if (unlikely(ret != 0)) goto out; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ac6fe40b99f7..d9831559706e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -645,7 +645,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, reservation_object_add_excl_fence(bo->resv, fence); if (evict) { - ret = ttm_bo_wait(bo, false, false, false); + ret = ttm_bo_wait(bo, false, false); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index dbd8d58cbc7d..3216878bced3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, /* * Quick non-stalling check for idle. */ - ret = ttm_bo_wait(bo, false, false, true); + ret = ttm_bo_wait(bo, false, true); if (likely(ret == 0)) goto out_unlock; @@ -68,14 +68,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, goto out_unlock; up_read(&vma->vm_mm->mmap_sem); - (void) ttm_bo_wait(bo, false, true, false); + (void) ttm_bo_wait(bo, true, false); goto out_unlock; } /* * Ordinary wait. */ - ret = ttm_bo_wait(bo, false, true, false); + ret = ttm_bo_wait(bo, true, false); if (unlikely(ret != 0)) ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 208a1fdb21f4..1483daebe057 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -158,7 +158,7 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait) r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); if (unlikely(r != 0)) return r; - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); + r = ttm_bo_wait(&bo->tbo, true, no_wait); ttm_bo_unreserve(&bo->tbo); return r; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 3329f623c8bf..e09423d75b6b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -839,7 +839,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, */ static void vmw_swap_notify(struct ttm_buffer_object *bo) { - ttm_bo_wait(bo, false, false, false); + ttm_bo_wait(bo, false, false); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index ddb3dd997437..265c81e6cf39 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -423,7 +423,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) bo = &buf->base; WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL)); - ret = ttm_bo_wait(old_bo, false, false, false); + ret = ttm_bo_wait(old_bo, false, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed waiting for cotable unbind.\n"); goto out_wait; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 9608d33a9fc4..6a328d507a28 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1512,7 +1512,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, list_del_init(&res->mob_head); } - (void) ttm_bo_wait(bo, false, false, false); + (void) ttm_bo_wait(bo, false, false); } } @@ -1605,7 +1605,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, if (fence != NULL) vmw_fence_obj_unreference(&fence); - (void) ttm_bo_wait(bo, false, false, false); + (void) ttm_bo_wait(bo, false, false); } else mutex_unlock(&dev_priv->binding_mutex); |