diff options
Diffstat (limited to 'drivers/gpu/drm/qxl/qxl_object.c')
| -rw-r--r-- | drivers/gpu/drm/qxl/qxl_object.c | 57 | 
1 files changed, 49 insertions, 8 deletions
| diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index ceebc5881f68..6e26d70f2f07 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -29,6 +29,9 @@  #include "qxl_drv.h"  #include "qxl_object.h" +static int __qxl_bo_pin(struct qxl_bo *bo); +static void __qxl_bo_unpin(struct qxl_bo *bo); +  static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)  {  	struct qxl_bo *bo; @@ -103,8 +106,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {  	.print_info = drm_gem_ttm_print_info,  }; -int qxl_bo_create(struct qxl_device *qdev, -		  unsigned long size, bool kernel, bool pinned, u32 domain, +int qxl_bo_create(struct qxl_device *qdev, unsigned long size, +		  bool kernel, bool pinned, u32 domain, u32 priority,  		  struct qxl_surface *surf,  		  struct qxl_bo **bo_ptr)  { @@ -137,9 +140,10 @@ int qxl_bo_create(struct qxl_device *qdev,  	qxl_ttm_placement_from_domain(bo, domain); +	bo->tbo.priority = priority;  	r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type, -				 &bo->placement, 0, &ctx, size, -				 NULL, NULL, &qxl_ttm_bo_destroy); +				 &bo->placement, 0, &ctx, NULL, NULL, +				 &qxl_ttm_bo_destroy);  	if (unlikely(r != 0)) {  		if (r != -ERESTARTSYS)  			dev_err(qdev->ddev.dev, @@ -154,10 +158,12 @@ int qxl_bo_create(struct qxl_device *qdev,  	return 0;  } -int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map) +int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map)  {  	int r; +	dma_resv_assert_held(bo->tbo.base.resv); +  	if (bo->kptr) {  		bo->map_count++;  		goto out; @@ -178,6 +184,25 @@ out:  	return 0;  } +int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map) +{ +	int r; + +	r = qxl_bo_reserve(bo); +	if (r) +		return r; + +	r = __qxl_bo_pin(bo); +	if (r) { +		qxl_bo_unreserve(bo); +		return r; +	} + +	r = qxl_bo_vmap_locked(bo, map); +	qxl_bo_unreserve(bo); +	return r; +} +  void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,  			      struct qxl_bo *bo, int page_offset)  { @@ -202,7 +227,7 @@ fallback:  		return rptr;  	} -	ret = qxl_bo_kmap(bo, &bo_map); +	ret = qxl_bo_vmap_locked(bo, &bo_map);  	if (ret)  		return NULL;  	rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */ @@ -211,8 +236,10 @@ fallback:  	return rptr;  } -void qxl_bo_kunmap(struct qxl_bo *bo) +void qxl_bo_vunmap_locked(struct qxl_bo *bo)  { +	dma_resv_assert_held(bo->tbo.base.resv); +  	if (bo->kptr == NULL)  		return;  	bo->map_count--; @@ -222,6 +249,20 @@ void qxl_bo_kunmap(struct qxl_bo *bo)  	ttm_bo_vunmap(&bo->tbo, &bo->map);  } +int qxl_bo_vunmap(struct qxl_bo *bo) +{ +	int r; + +	r = qxl_bo_reserve(bo); +	if (r) +		return r; + +	qxl_bo_vunmap_locked(bo); +	__qxl_bo_unpin(bo); +	qxl_bo_unreserve(bo); +	return 0; +} +  void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,  			       struct qxl_bo *bo, void *pmap)  { @@ -232,7 +273,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,  	io_mapping_unmap_atomic(pmap);  	return;   fallback: -	qxl_bo_kunmap(bo); +	qxl_bo_vunmap_locked(bo);  }  void qxl_bo_unref(struct qxl_bo **bo) | 
