diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_cs.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 134 |
1 files changed, 54 insertions, 80 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a3e7aed7e680..c830863bc98a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) struct drm_device *ddev = p->rdev->ddev; struct radeon_cs_chunk *chunk; struct radeon_cs_buckets buckets; - unsigned i, j; - bool duplicate, need_mmap_lock = false; + unsigned i; + bool need_mmap_lock = false; int r; - if (p->chunk_relocs_idx == -1) { + if (p->chunk_relocs == NULL) { return 0; } - chunk = &p->chunks[p->chunk_relocs_idx]; + chunk = p->chunk_relocs; p->dma_reloc_idx = 0; /* FIXME: we assume that each relocs use 4 dwords */ p->nrelocs = chunk->length_dw / 4; - p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); - if (p->relocs_ptr == NULL) { - return -ENOMEM; - } - p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL); + p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL); if (p->relocs == NULL) { return -ENOMEM; } @@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) for (i = 0; i < p->nrelocs; i++) { struct drm_radeon_cs_reloc *r; + struct drm_gem_object *gobj; unsigned priority; - duplicate = false; r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; - for (j = 0; j < i; j++) { - if (r->handle == p->relocs[j].handle) { - p->relocs_ptr[i] = &p->relocs[j]; - duplicate = true; - break; - } - } - if (duplicate) { - p->relocs[i].handle = 0; - continue; - } - - p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp, - r->handle); - if (p->relocs[i].gobj == NULL) { + gobj = drm_gem_object_lookup(ddev, p->filp, r->handle); + if (gobj == NULL) { DRM_ERROR("gem object lookup failed 0x%x\n", r->handle); return -ENOENT; } - p->relocs_ptr[i] = &p->relocs[i]; - p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); + p->relocs[i].robj = gem_to_radeon_bo(gobj); /* The userspace buffer priorities are from 0 to 15. A higher * number means the buffer is more important. @@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].tv.shared = !r->write_domain; - p->relocs[i].handle = r->handle; radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, priority); @@ -251,22 +232,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority static int radeon_cs_sync_rings(struct radeon_cs_parser *p) { - int i, r = 0; + struct radeon_bo_list *reloc; + int r; - for (i = 0; i < p->nrelocs; i++) { + list_for_each_entry(reloc, &p->validated, tv.head) { struct reservation_object *resv; - if (!p->relocs[i].robj) - continue; - - resv = p->relocs[i].robj->tbo.resv; - r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, - p->relocs[i].tv.shared); - + resv = reloc->robj->tbo.resv; + r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, + reloc->tv.shared); if (r) - break; + return r; } - return r; + return 0; } /* XXX: note that this is called from the legacy UMS CS ioctl as well */ @@ -285,13 +263,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) INIT_LIST_HEAD(&p->validated); p->idx = 0; p->ib.sa_bo = NULL; - p->ib.semaphore = NULL; p->const_ib.sa_bo = NULL; - p->const_ib.semaphore = NULL; - p->chunk_ib_idx = -1; - p->chunk_relocs_idx = -1; - p->chunk_flags_idx = -1; - p->chunk_const_ib_idx = -1; + p->chunk_ib = NULL; + p->chunk_relocs = NULL; + p->chunk_flags = NULL; + p->chunk_const_ib = NULL; p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); if (p->chunks_array == NULL) { return -ENOMEM; @@ -318,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return -EFAULT; } p->chunks[i].length_dw = user_chunk.length_dw; - p->chunks[i].chunk_id = user_chunk.chunk_id; - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { - p->chunk_relocs_idx = i; + if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) { + p->chunk_relocs = &p->chunks[i]; } - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { - p->chunk_ib_idx = i; + if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { + p->chunk_ib = &p->chunks[i]; /* zero length IB isn't useful */ if (p->chunks[i].length_dw == 0) return -EINVAL; } - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) { - p->chunk_const_ib_idx = i; + if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) { + p->chunk_const_ib = &p->chunks[i]; /* zero length CONST IB isn't useful */ if (p->chunks[i].length_dw == 0) return -EINVAL; } - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { - p->chunk_flags_idx = i; + if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { + p->chunk_flags = &p->chunks[i]; /* zero length flags aren't useful */ if (p->chunks[i].length_dw == 0) return -EINVAL; @@ -344,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) size = p->chunks[i].length_dw; cdata = (void __user *)(unsigned long)user_chunk.chunk_data; p->chunks[i].user_ptr = cdata; - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) + if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) continue; - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { + if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) continue; } @@ -360,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) if (copy_from_user(p->chunks[i].kdata, cdata, size)) { return -EFAULT; } - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { + if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { p->cs_flags = p->chunks[i].kdata[0]; if (p->chunks[i].length_dw > 1) ring = p->chunks[i].kdata[1]; @@ -401,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) static int cmp_size_smaller_first(void *priv, struct list_head *a, struct list_head *b) { - struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head); - struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head); + struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); + struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); /* Sort A before B if A is smaller. */ return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; @@ -443,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { - if (parser->relocs[i].gobj) - drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); + struct radeon_bo *bo = parser->relocs[i].robj; + if (bo == NULL) + continue; + + drm_gem_object_unreference_unlocked(&bo->gem_base); } } kfree(parser->track); kfree(parser->relocs); - kfree(parser->relocs_ptr); drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); @@ -464,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, { int r; - if (parser->chunk_ib_idx == -1) + if (parser->chunk_ib == NULL) return 0; if (parser->cs_flags & RADEON_CS_USE_VM) @@ -524,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, for (i = 0; i < p->nrelocs; i++) { struct radeon_bo *bo; - /* ignore duplicates */ - if (p->relocs_ptr[i] != &p->relocs[i]) - continue; - bo = p->relocs[i].robj; bo_va = radeon_vm_bo_find(vm, bo); if (bo_va == NULL) { @@ -538,6 +511,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); if (r) return r; + + radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update); } return radeon_vm_clear_invalids(rdev, vm); @@ -550,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, struct radeon_vm *vm = &fpriv->vm; int r; - if (parser->chunk_ib_idx == -1) + if (parser->chunk_ib == NULL) return 0; if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) return 0; @@ -582,10 +557,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, DRM_ERROR("Failed to sync rings: %i\n", r); goto out; } - radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence); if ((rdev->family >= CHIP_TAHITI) && - (parser->chunk_const_ib_idx != -1)) { + (parser->chunk_const_ib != NULL)) { r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); } else { r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); @@ -612,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser struct radeon_vm *vm = NULL; int r; - if (parser->chunk_ib_idx == -1) + if (parser->chunk_ib == NULL) return 0; if (parser->cs_flags & RADEON_CS_USE_VM) { @@ -620,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser vm = &fpriv->vm; if ((rdev->family >= CHIP_TAHITI) && - (parser->chunk_const_ib_idx != -1)) { - ib_chunk = &parser->chunks[parser->chunk_const_ib_idx]; + (parser->chunk_const_ib != NULL)) { + ib_chunk = parser->chunk_const_ib; if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); return -EINVAL; @@ -640,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser return -EFAULT; } - ib_chunk = &parser->chunks[parser->chunk_ib_idx]; + ib_chunk = parser->chunk_ib; if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); return -EINVAL; } } - ib_chunk = &parser->chunks[parser->chunk_ib_idx]; + ib_chunk = parser->chunk_ib; r = radeon_ib_get(rdev, parser->ring, &parser->ib, vm, ib_chunk->length_dw * 4); @@ -738,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx) { - struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; + struct radeon_cs_chunk *ib_chunk = p->chunk_ib; struct radeon_device *rdev = p->rdev; uint32_t header; @@ -832,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p, * GPU offset using the provided start. **/ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, - struct radeon_cs_reloc **cs_reloc, + struct radeon_bo_list **cs_reloc, int nomm) { struct radeon_cs_chunk *relocs_chunk; @@ -840,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, unsigned idx; int r; - if (p->chunk_relocs_idx == -1) { + if (p->chunk_relocs == NULL) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } *cs_reloc = NULL; - relocs_chunk = &p->chunks[p->chunk_relocs_idx]; + relocs_chunk = p->chunk_relocs; r = radeon_cs_packet_parse(p, &p3reloc, p->idx); if (r) return r; @@ -871,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, (u64)relocs_chunk->kdata[idx + 3] << 32; (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; } else - *cs_reloc = p->relocs_ptr[(idx / 4)]; + *cs_reloc = &p->relocs[(idx / 4)]; return 0; } |