summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c15
7 files changed, 33 insertions, 44 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index bd328cb6fa61..6e3d1c8f3483 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -253,11 +253,17 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
int i;
for (i = 0; i < p->nrelocs; i++) {
+ struct reservation_object *resv;
+ struct fence *fence;
+
if (!p->relocs[i].robj)
continue;
+ resv = p->relocs[i].robj->tbo.resv;
+ fence = reservation_object_get_excl(resv);
+
radeon_semaphore_sync_to(p->ib.semaphore,
- p->relocs[i].robj->tbo.sync_obj);
+ (struct radeon_fence *)fence);
}
}
@@ -427,7 +433,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
- parser->ib.fence);
+ &parser->ib.fence->base);
} else if (backoff) {
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7d0a7abdab2a..bc894c17b2f9 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -494,7 +494,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
+ work->fence = (struct radeon_fence *)fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 0157bc2f11f8..a69bd441dd2d 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -122,6 +122,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct radeon_bo *bo;
+ struct fence *fence;
int r;
bo = container_of(it, struct radeon_bo, mn_it);
@@ -133,8 +134,9 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
continue;
}
- if (bo->tbo.sync_obj) {
- r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ fence = reservation_object_get_excl(bo->tbo.resv);
+ if (fence) {
+ r = radeon_fence_wait((struct radeon_fence *)fence, false);
if (r)
DRM_ERROR("(%d) failed to wait for user bo\n", r);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 378fe9ea4d44..aadbd36e64b9 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -781,8 +781,8 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
return r;
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
- if (bo->tbo.sync_obj)
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
ttm_bo_unreserve(&bo->tbo);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 822eb3630045..62d1f4d730a2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -270,12 +270,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
/* sync other rings */
- fence = bo->sync_obj;
+ fence = (struct radeon_fence *)reservation_object_get_excl(bo->resv);
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
&fence);
/* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+ r = ttm_bo_move_accel_cleanup(bo, &fence->base,
evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
@@ -488,31 +488,6 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
-static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
-{
- return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
-}
-
-static int radeon_sync_obj_flush(void *sync_obj)
-{
- return 0;
-}
-
-static void radeon_sync_obj_unref(void **sync_obj)
-{
- radeon_fence_unref((struct radeon_fence **)sync_obj);
-}
-
-static void *radeon_sync_obj_ref(void *sync_obj)
-{
- return radeon_fence_ref((struct radeon_fence *)sync_obj);
-}
-
-static bool radeon_sync_obj_signaled(void *sync_obj)
-{
- return radeon_fence_signaled((struct radeon_fence *)sync_obj);
-}
-
/*
* TTM backend functions.
*/
@@ -847,11 +822,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
- .sync_obj_signaled = &radeon_sync_obj_signaled,
- .sync_obj_wait = &radeon_sync_obj_wait,
- .sync_obj_flush = &radeon_sync_obj_flush,
- .sync_obj_unref = &radeon_sync_obj_unref,
- .sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 5729e9bebd9d..ba4f38916026 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -400,6 +400,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
+ struct fence *f;
void *ptr;
int i, r;
@@ -409,8 +410,9 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- if (bo->tbo.sync_obj) {
- r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ f = reservation_object_get_excl(bo->tbo.resv);
+ if (f) {
+ r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {
DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 3d9a6a036f8a..671ee566aa51 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -424,7 +424,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
if (r)
goto error;
- ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
+ ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
radeon_ib_free(rdev, &ib);
return 0;
@@ -693,8 +693,14 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
incr, R600_PTE_VALID);
if (ib.length_dw != 0) {
+ struct fence *fence;
+
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
+
+ fence = reservation_object_get_excl(pd->tbo.resv);
+ radeon_semaphore_sync_to(ib.semaphore,
+ (struct radeon_fence *)fence);
+
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
@@ -820,8 +826,11 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes;
uint64_t pte;
+ struct fence *fence;
- radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
+ fence = reservation_object_get_excl(pt->tbo.resv);
+ radeon_semaphore_sync_to(ib->semaphore,
+ (struct radeon_fence *)fence);
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;