diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-10-27 21:00:19 +0300 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-10-27 21:00:19 +0300 |
commit | 728d90bdc9e480dc93913e59a0aa3c896c7aa697 (patch) | |
tree | 258b1b6ee711f0ef67fd225700d84eccec285194 /drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | |
parent | cb3efd5a38855eabd26c2b631dd027169678d60f (diff) | |
parent | d6d5df1db6e9d7f8f76d2911707f7d5877251b02 (diff) | |
download | linux-728d90bdc9e480dc93913e59a0aa3c896c7aa697.tar.xz |
Merge tag 'v5.4-rc5' into next
Sync up with mainline.
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index b4f6e1217c9d..3ca5cf375b01 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -116,6 +116,8 @@ static const struct vmw_res_func vmw_cotable_func = { .res_type = vmw_res_cotable, .needs_backup = true, .may_evict = true, + .prio = 3, + .dirty_prio = 3, .type_name = "context guest backed object tables", .backup_placement = &vmw_mob_placement, .create = vmw_cotable_create, @@ -169,7 +171,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res) } *cmd; WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); - lockdep_assert_held(&bo->resv->lock.base); + dma_resv_assert_held(bo->base.resv); cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); if (!cmd) @@ -307,11 +309,11 @@ static int vmw_cotable_unbind(struct vmw_resource *res, struct ttm_buffer_object *bo = val_buf->bo; struct vmw_fence_obj *fence; - if (list_empty(&res->mob_head)) + if (!vmw_resource_mob_attached(res)) return 0; WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); - lockdep_assert_held(&bo->resv->lock.base); + dma_resv_assert_held(bo->base.resv); mutex_lock(&dev_priv->binding_mutex); if (!vcotbl->scrubbed) @@ -453,6 +455,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) goto out_wait; } + vmw_resource_mob_detach(res); res->backup = buf; res->backup_size = new_size; vcotbl->size_read_back = cur_size_read_back; @@ -467,12 +470,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) res->backup = old_buf; res->backup_size = old_size; vcotbl->size_read_back = old_size_read_back; + vmw_resource_mob_attach(res); goto out_wait; } + vmw_resource_mob_attach(res); /* Let go of the old mob. */ - list_del(&res->mob_head); - list_add_tail(&res->mob_head, &buf->res_list); vmw_bo_unreference(&old_buf); res->id = vcotbl->type; @@ -496,7 +499,7 @@ out_wait: * is called before bind() in the validation sequence is instead used for two * things. * 1) Unscrub the cotable if it is scrubbed and still attached to a backup - * buffer, that is, if @res->mob_head is non-empty. + * buffer. * 2) Resize the cotable if needed. */ static int vmw_cotable_create(struct vmw_resource *res) @@ -512,7 +515,7 @@ static int vmw_cotable_create(struct vmw_resource *res) new_size *= 2; if (likely(new_size <= res->backup_size)) { - if (vcotbl->scrubbed && !list_empty(&res->mob_head)) { + if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) { ret = vmw_cotable_unscrub(res); if (ret) return ret; |