diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2013-12-18 17:13:29 +0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2014-01-08 13:11:57 +0400 |
commit | 05efb1abecce6e36457ae1a7be29ded7ac52292a (patch) | |
tree | 59be0d416b8811b273bc1b64f34fa4f0f4cb7b04 /drivers/gpu | |
parent | 859ae233cd0ee76b6143f948ba1cb6b0b4c342f8 (diff) | |
download | linux-05efb1abecce6e36457ae1a7be29ded7ac52292a.tar.xz |
drm/ttm: ttm object security fixes for render nodes
When a client looks up a ttm object, don't look it up through the device hash
table, but rather from the file hash table. That makes sure that the client
has indeed put a reference on the object, or in gem terms, has opened
the object; either using prime or using the global "name".
To avoid a performance loss, make sure the file hash table entries can be
looked up from under an RCU lock, and as a consequence, replace the rwlock
with a spinlock, since we never need to take it in read mode only anymore.
Finally add a ttm object lookup function for the device hash table, that is
intended to be used when we put a ref object on a base object or, in gem terms,
when we open the object.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_object.c | 90 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 3 |
3 files changed, 58 insertions, 38 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 6fe7b92a82d1..37079859afc8 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -68,7 +68,7 @@ struct ttm_object_file { struct ttm_object_device *tdev; - rwlock_t lock; + spinlock_t lock; struct list_head ref_list; struct drm_open_hash ref_hash[TTM_REF_NUM]; struct kref refcount; @@ -118,6 +118,7 @@ struct ttm_object_device { */ struct ttm_ref_object { + struct rcu_head rcu_head; struct drm_hash_item hash; struct list_head head; struct kref kref; @@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref) * call_rcu() or ttm_base_object_kfree(). */ - if (base->refcount_release) { - ttm_object_file_unref(&base->tfile); + ttm_object_file_unref(&base->tfile); + if (base->refcount_release) base->refcount_release(&base); - } } void ttm_base_object_unref(struct ttm_base_object **p_base) @@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref); struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, uint32_t key) { - struct ttm_object_device *tdev = tfile->tdev; - struct ttm_base_object *uninitialized_var(base); + struct ttm_base_object *base = NULL; struct drm_hash_item *hash; + struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; int ret; rcu_read_lock(); - ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); + ret = drm_ht_find_item_rcu(ht, key, &hash); if (likely(ret == 0)) { - base = drm_hash_entry(hash, struct ttm_base_object, hash); - ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; + base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; + if (!kref_get_unless_zero(&base->refcount)) + base = NULL; } rcu_read_unlock(); - if (unlikely(ret != 0)) - return NULL; + return base; +} +EXPORT_SYMBOL(ttm_base_object_lookup); - if (tfile != base->tfile && !base->shareable) { - pr_err("Attempted access of non-shareable object\n"); - ttm_base_object_unref(&base); - return NULL; +struct ttm_base_object * +ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) +{ + struct ttm_base_object *base = NULL; + struct drm_hash_item *hash; + struct drm_open_hash *ht = &tdev->object_hash; + int ret; + + rcu_read_lock(); + ret = drm_ht_find_item_rcu(ht, key, &hash); + + if (likely(ret == 0)) { + base = drm_hash_entry(hash, struct ttm_base_object, hash); + if (!kref_get_unless_zero(&base->refcount)) + base = NULL; } + rcu_read_unlock(); return base; } -EXPORT_SYMBOL(ttm_base_object_lookup); +EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, @@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; int ret = -EINVAL; + if (base->tfile != tfile && !base->shareable) + return -EPERM; + if (existed != NULL) *existed = true; while (ret == -EINVAL) { - read_lock(&tfile->lock); - ret = drm_ht_find_item(ht, base->hash.key, &hash); + rcu_read_lock(); + ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); if (ret == 0) { ref = drm_hash_entry(hash, struct ttm_ref_object, hash); - kref_get(&ref->kref); - read_unlock(&tfile->lock); - break; + if (!kref_get_unless_zero(&ref->kref)) { + rcu_read_unlock(); + break; + } } - read_unlock(&tfile->lock); + rcu_read_unlock(); ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false); if (unlikely(ret != 0)) @@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ref->ref_type = ref_type; kref_init(&ref->kref); - write_lock(&tfile->lock); - ret = drm_ht_insert_item(ht, &ref->hash); + spin_lock(&tfile->lock); + ret = drm_ht_insert_item_rcu(ht, &ref->hash); if (likely(ret == 0)) { list_add_tail(&ref->head, &tfile->ref_list); kref_get(&base->refcount); - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); if (existed != NULL) *existed = false; break; } - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); BUG_ON(ret != -EINVAL); ttm_mem_global_free(mem_glob, sizeof(*ref)); @@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref) struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; ht = &tfile->ref_hash[ref->ref_type]; - (void)drm_ht_remove_item(ht, &ref->hash); + (void)drm_ht_remove_item_rcu(ht, &ref->hash); list_del(&ref->head); - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) base->ref_obj_release(base, ref->ref_type); ttm_base_object_unref(&ref->obj); ttm_mem_global_free(mem_glob, sizeof(*ref)); - kfree(ref); - write_lock(&tfile->lock); + kfree_rcu(ref, rcu_head); + spin_lock(&tfile->lock); } int ttm_ref_object_base_unref(struct ttm_object_file *tfile, @@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile, struct drm_hash_item *hash; int ret; - write_lock(&tfile->lock); + spin_lock(&tfile->lock); ret = drm_ht_find_item(ht, key, &hash); if (unlikely(ret != 0)) { - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); return -EINVAL; } ref = drm_hash_entry(hash, struct ttm_ref_object, hash); kref_put(&ref->kref, ttm_ref_object_release); - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); return 0; } EXPORT_SYMBOL(ttm_ref_object_base_unref); @@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) struct ttm_object_file *tfile = *p_tfile; *p_tfile = NULL; - write_lock(&tfile->lock); + spin_lock(&tfile->lock); /* * Since we release the lock within the loop, we have to @@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) for (i = 0; i < TTM_REF_NUM; ++i) drm_ht_remove(&tfile->ref_hash[i]); - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); ttm_object_file_unref(&tfile); } EXPORT_SYMBOL(ttm_object_file_release); @@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, if (unlikely(tfile == NULL)) return NULL; - rwlock_init(&tfile->lock); + spin_lock_init(&tfile->lock); tfile->tdev = tdev; kref_init(&tfile->refcount); INIT_LIST_HEAD(&tfile->ref_list); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index c62d20e8a6f1..5d3f67768524 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, */ if (arg->handle) { struct ttm_base_object *base = - ttm_base_object_lookup(vmw_fp->tfile, arg->handle); + ttm_base_object_lookup_for_ref(dev_priv->tdev, + arg->handle); if (unlikely(base == NULL)) { DRM_ERROR("Fence event invalid fence object handle " diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 7de2ea8bd553..0fc93398bba2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -843,6 +843,7 @@ out_unlock: int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vmw_private *dev_priv = vmw_priv(dev); union drm_vmw_surface_reference_arg *arg = (union drm_vmw_surface_reference_arg *)data; struct drm_vmw_surface_arg *req = &arg->req; @@ -854,7 +855,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, struct ttm_base_object *base; int ret = -EINVAL; - base = ttm_base_object_lookup(tfile, req->sid); + base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); if (unlikely(base == NULL)) { DRM_ERROR("Could not find surface to reference.\n"); return -EINVAL; |