diff options
Diffstat (limited to 'drivers/accel/amdxdna/amdxdna_gem.c')
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_gem.c | 332 |
1 files changed, 218 insertions, 114 deletions
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c index 26831ec69f89..d407a36eb412 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.c +++ b/drivers/accel/amdxdna/amdxdna_gem.c @@ -18,46 +18,86 @@ #include "amdxdna_ctx.h" #include "amdxdna_gem.h" #include "amdxdna_pci_drv.h" +#include "amdxdna_ubuf.h" #define XDNA_MAX_CMD_BO_SIZE SZ_32K MODULE_IMPORT_NS("DMA_BUF"); static int -amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap) +amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo) { struct amdxdna_client *client = abo->client; struct amdxdna_dev *xdna = client->xdna; struct amdxdna_mem *mem = &abo->mem; + struct amdxdna_gem_obj *heap; u64 offset; u32 align; int ret; + mutex_lock(&client->mm_lock); + + heap = client->dev_heap; + if (!heap) { + ret = -EINVAL; + goto unlock_out; + } + + if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) { + XDNA_ERR(xdna, "Invalid dev heap userptr"); + ret = -EINVAL; + goto unlock_out; + } + + if (mem->size == 0 || mem->size > heap->mem.size) { + XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx", + mem->size, heap->mem.size); + ret = -EINVAL; + goto unlock_out; + } + align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift); - ret = drm_mm_insert_node_generic(&abo->dev_heap->mm, &abo->mm_node, + ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node, mem->size, align, 0, DRM_MM_INSERT_BEST); if (ret) { XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret); - return ret; + goto unlock_out; } mem->dev_addr = abo->mm_node.start; - offset = mem->dev_addr - abo->dev_heap->mem.dev_addr; - mem->userptr = abo->dev_heap->mem.userptr + offset; - mem->pages = &abo->dev_heap->base.pages[offset >> PAGE_SHIFT]; - mem->nr_pages = mem->size >> PAGE_SHIFT; - - if (use_vmap) { - mem->kva = vmap(mem->pages, mem->nr_pages, VM_MAP, PAGE_KERNEL); - if (!mem->kva) { - XDNA_ERR(xdna, "Failed to vmap"); - drm_mm_remove_node(&abo->mm_node); - return -EFAULT; - } - } + offset = mem->dev_addr - heap->mem.dev_addr; + mem->userptr = heap->mem.userptr + offset; + mem->kva = heap->mem.kva + offset; - return 0; + drm_gem_object_get(to_gobj(heap)); + +unlock_out: + mutex_unlock(&client->mm_lock); + + return ret; +} + +static void +amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo) +{ + mutex_destroy(&abo->lock); + kfree(abo); +} + +static void +amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo) +{ + struct amdxdna_gem_obj *heap; + + mutex_lock(&abo->client->mm_lock); + + drm_mm_remove_node(&abo->mm_node); + + heap = abo->client->dev_heap; + drm_gem_object_put(to_gobj(heap)); + + mutex_unlock(&abo->client->mm_lock); } static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni, @@ -213,6 +253,20 @@ free_map: return ret; } +static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj) +{ + struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); + struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); + + XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); + if (abo->pinned) + amdxdna_gem_unpin(abo); + + amdxdna_gem_heap_free(abo); + drm_gem_object_release(gobj); + amdxdna_gem_destroy_obj(abo); +} + static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo, struct vm_area_struct *vma) { @@ -243,7 +297,7 @@ static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo, vma->vm_private_data = NULL; vma->vm_ops = NULL; - ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0); + ret = dma_buf_mmap(abo->dma_buf, vma, 0); if (ret) { XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret); return ret; @@ -338,10 +392,47 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = { .vunmap = drm_gem_dmabuf_vunmap, }; +static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct amdxdna_gem_obj *abo = to_xdna_obj(obj); + + iosys_map_clear(map); + + dma_resv_assert_held(obj->resv); + + if (is_import_bo(abo)) + dma_buf_vmap(abo->dma_buf, map); + else + drm_gem_shmem_object_vmap(obj, map); + + if (!map->vaddr) + return -ENOMEM; + + return 0; +} + +static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct amdxdna_gem_obj *abo = to_xdna_obj(obj); + + dma_resv_assert_held(obj->resv); + + if (is_import_bo(abo)) + dma_buf_vunmap(abo->dma_buf, map); + else + drm_gem_shmem_object_vunmap(obj, map); +} + static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) { + struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + if (abo->dma_buf) { + get_dma_buf(abo->dma_buf); + return abo->dma_buf; + } + exp_info.ops = &amdxdna_dmabuf_ops; exp_info.size = gobj->size; exp_info.flags = flags; @@ -374,19 +465,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) if (abo->pinned) amdxdna_gem_unpin(abo); - if (abo->type == AMDXDNA_BO_DEV) { - mutex_lock(&abo->client->mm_lock); - drm_mm_remove_node(&abo->mm_node); - mutex_unlock(&abo->client->mm_lock); - - vunmap(abo->mem.kva); - drm_gem_object_put(to_gobj(abo->dev_heap)); - drm_gem_object_release(gobj); - mutex_destroy(&abo->lock); - kfree(abo); - return; - } - if (abo->type == AMDXDNA_BO_DEV_HEAP) drm_mm_takedown(&abo->mm); @@ -402,7 +480,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) } static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = { - .free = amdxdna_gem_obj_free, + .free = amdxdna_gem_dev_obj_free, }; static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { @@ -411,8 +489,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { .pin = drm_gem_shmem_object_pin, .unpin = drm_gem_shmem_object_unpin, .get_sg_table = drm_gem_shmem_object_get_sg_table, - .vmap = drm_gem_shmem_object_vmap, - .vunmap = drm_gem_shmem_object_vunmap, + .vmap = amdxdna_gem_obj_vmap, + .vunmap = amdxdna_gem_obj_vunmap, .mmap = amdxdna_gem_obj_mmap, .vm_ops = &drm_gem_shmem_vm_ops, .export = amdxdna_gem_prime_export, @@ -454,6 +532,68 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size) return to_gobj(abo); } +static struct amdxdna_gem_obj * +amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size) +{ + struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size); + + if (IS_ERR(shmem)) + return ERR_CAST(shmem); + + shmem->map_wc = false; + return to_xdna_obj(&shmem->base); +} + +static struct amdxdna_gem_obj * +amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args) +{ + struct amdxdna_dev *xdna = to_xdna_dev(dev); + enum amdxdna_ubuf_flag flags = 0; + struct amdxdna_drm_va_tbl va_tbl; + struct drm_gem_object *gobj; + struct dma_buf *dma_buf; + + if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) { + XDNA_DBG(xdna, "Access va table failed"); + return ERR_PTR(-EINVAL); + } + + if (va_tbl.num_entries) { + if (args->type == AMDXDNA_BO_CMD) + flags |= AMDXDNA_UBUF_FLAG_MAP_DMA; + + dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries, + u64_to_user_ptr(args->vaddr + sizeof(va_tbl))); + } else { + dma_buf = dma_buf_get(va_tbl.dmabuf_fd); + } + + if (IS_ERR(dma_buf)) + return ERR_CAST(dma_buf); + + gobj = amdxdna_gem_prime_import(dev, dma_buf); + if (IS_ERR(gobj)) { + dma_buf_put(dma_buf); + return ERR_CAST(gobj); + } + + dma_buf_put(dma_buf); + + return to_xdna_obj(gobj); +} + +static struct amdxdna_gem_obj * +amdxdna_gem_create_object(struct drm_device *dev, + struct amdxdna_drm_create_bo *args) +{ + size_t aligned_sz = PAGE_ALIGN(args->size); + + if (args->vaddr) + return amdxdna_gem_create_ubuf_object(dev, args); + + return amdxdna_gem_create_shmem_object(dev, aligned_sz); +} + struct drm_gem_object * amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { @@ -505,16 +645,12 @@ amdxdna_drm_alloc_shmem(struct drm_device *dev, struct drm_file *filp) { struct amdxdna_client *client = filp->driver_priv; - struct drm_gem_shmem_object *shmem; struct amdxdna_gem_obj *abo; - shmem = drm_gem_shmem_create(dev, args->size); - if (IS_ERR(shmem)) - return ERR_CAST(shmem); - - shmem->map_wc = false; + abo = amdxdna_gem_create_object(dev, args); + if (IS_ERR(abo)) + return ERR_CAST(abo); - abo = to_xdna_obj(&shmem->base); abo->client = client; abo->type = AMDXDNA_BO_SHMEM; @@ -527,8 +663,8 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, struct drm_file *filp) { struct amdxdna_client *client = filp->driver_priv; + struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); - struct drm_gem_shmem_object *shmem; struct amdxdna_gem_obj *abo; int ret; @@ -545,26 +681,32 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, goto mm_unlock; } - shmem = drm_gem_shmem_create(dev, args->size); - if (IS_ERR(shmem)) { - ret = PTR_ERR(shmem); + abo = amdxdna_gem_create_object(dev, args); + if (IS_ERR(abo)) { + ret = PTR_ERR(abo); goto mm_unlock; } - shmem->map_wc = false; - abo = to_xdna_obj(&shmem->base); - abo->type = AMDXDNA_BO_DEV_HEAP; abo->client = client; abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size); + ret = drm_gem_vmap(to_gobj(abo), &map); + if (ret) { + XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret); + goto release_obj; + } + abo->mem.kva = map.vaddr; + client->dev_heap = abo; drm_gem_object_get(to_gobj(abo)); mutex_unlock(&client->mm_lock); return abo; +release_obj: + drm_gem_object_put(to_gobj(abo)); mm_unlock: mutex_unlock(&client->mm_lock); return ERR_PTR(ret); @@ -573,58 +715,32 @@ mm_unlock: struct amdxdna_gem_obj * amdxdna_drm_alloc_dev_bo(struct drm_device *dev, struct amdxdna_drm_create_bo *args, - struct drm_file *filp, bool use_vmap) + struct drm_file *filp) { struct amdxdna_client *client = filp->driver_priv; struct amdxdna_dev *xdna = to_xdna_dev(dev); size_t aligned_sz = PAGE_ALIGN(args->size); - struct amdxdna_gem_obj *abo, *heap; + struct amdxdna_gem_obj *abo; int ret; - mutex_lock(&client->mm_lock); - heap = client->dev_heap; - if (!heap) { - ret = -EINVAL; - goto mm_unlock; - } - - if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) { - XDNA_ERR(xdna, "Invalid dev heap userptr"); - ret = -EINVAL; - goto mm_unlock; - } - - if (args->size > heap->mem.size) { - XDNA_ERR(xdna, "Invalid dev bo size 0x%llx, limit 0x%lx", - args->size, heap->mem.size); - ret = -EINVAL; - goto mm_unlock; - } - abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz); - if (IS_ERR(abo)) { - ret = PTR_ERR(abo); - goto mm_unlock; - } + if (IS_ERR(abo)) + return abo; + to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs; abo->type = AMDXDNA_BO_DEV; abo->client = client; - abo->dev_heap = heap; - ret = amdxdna_gem_insert_node_locked(abo, use_vmap); + + ret = amdxdna_gem_heap_alloc(abo); if (ret) { XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret); - goto mm_unlock; + amdxdna_gem_destroy_obj(abo); + return ERR_PTR(ret); } - drm_gem_object_get(to_gobj(heap)); drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz); - mutex_unlock(&client->mm_lock); return abo; - -mm_unlock: - mutex_unlock(&client->mm_lock); - return ERR_PTR(ret); } static struct amdxdna_gem_obj * @@ -632,10 +748,9 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, struct amdxdna_drm_create_bo *args, struct drm_file *filp) { + struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); - struct drm_gem_shmem_object *shmem; struct amdxdna_gem_obj *abo; - struct iosys_map map; int ret; if (args->size > XDNA_MAX_CMD_BO_SIZE) { @@ -648,12 +763,9 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, return ERR_PTR(-EINVAL); } - shmem = drm_gem_shmem_create(dev, args->size); - if (IS_ERR(shmem)) - return ERR_CAST(shmem); - - shmem->map_wc = false; - abo = to_xdna_obj(&shmem->base); + abo = amdxdna_gem_create_object(dev, args); + if (IS_ERR(abo)) + return ERR_CAST(abo); abo->type = AMDXDNA_BO_CMD; abo->client = filp->driver_priv; @@ -668,7 +780,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, return abo; release_obj: - drm_gem_shmem_free(shmem); + drm_gem_object_put(to_gobj(abo)); return ERR_PTR(ret); } @@ -679,7 +791,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f struct amdxdna_gem_obj *abo; int ret; - if (args->flags || args->vaddr || !args->size) + if (args->flags) return -EINVAL; XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx", @@ -692,7 +804,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f abo = amdxdna_drm_create_dev_heap(dev, args, filp); break; case AMDXDNA_BO_DEV: - abo = amdxdna_drm_alloc_dev_bo(dev, args, filp, false); + abo = amdxdna_drm_alloc_dev_bo(dev, args, filp); break; case AMDXDNA_BO_CMD: abo = amdxdna_drm_create_cmd_bo(dev, args, filp); @@ -724,20 +836,13 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo) struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); int ret; + if (abo->type == AMDXDNA_BO_DEV) + abo = abo->client->dev_heap; + if (is_import_bo(abo)) return 0; - switch (abo->type) { - case AMDXDNA_BO_SHMEM: - case AMDXDNA_BO_DEV_HEAP: - ret = drm_gem_shmem_pin(&abo->base); - break; - case AMDXDNA_BO_DEV: - ret = drm_gem_shmem_pin(&abo->dev_heap->base); - break; - default: - ret = -EOPNOTSUPP; - } + ret = drm_gem_shmem_pin(&abo->base); XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret); return ret; @@ -747,9 +852,6 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo) { int ret; - if (abo->type == AMDXDNA_BO_DEV) - abo = abo->dev_heap; - mutex_lock(&abo->lock); ret = amdxdna_gem_pin_nolock(abo); mutex_unlock(&abo->lock); @@ -759,12 +861,12 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo) void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo) { + if (abo->type == AMDXDNA_BO_DEV) + abo = abo->client->dev_heap; + if (is_import_bo(abo)) return; - if (abo->type == AMDXDNA_BO_DEV) - abo = abo->dev_heap; - mutex_lock(&abo->lock); drm_gem_shmem_unpin(&abo->base); mutex_unlock(&abo->lock); @@ -855,10 +957,12 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, if (is_import_bo(abo)) drm_clflush_sg(abo->base.sgt); - else if (abo->type == AMDXDNA_BO_DEV) - drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages); - else + else if (abo->mem.kva) + drm_clflush_virt_range(abo->mem.kva + args->offset, args->size); + else if (abo->base.pages) drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT); + else + drm_WARN(&xdna->ddev, 1, "Can not get flush memory"); amdxdna_gem_unpin(abo); |