diff options
author | Dave Airlie <airlied@redhat.com> | 2020-03-12 05:42:41 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-03-12 05:42:56 +0300 |
commit | 9e12da086e5e38f7b4f8d6e02a82447f4165fbee (patch) | |
tree | 96e6029378ccb9ae3f9530d019c8216a3ca4a556 /drivers/gpu/drm/amd/amdgpu | |
parent | d3bd37f587b4438d47751d0f1d5aaae3d39bd416 (diff) | |
parent | bc1a4130fc0309cc2f43b9cc616ebbc295e886ff (diff) | |
download | linux-9e12da086e5e38f7b4f8d6e02a82447f4165fbee.tar.xz |
Merge tag 'drm-misc-next-2020-03-09' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.7:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
Driver Changes:
- fb-helper: Remove drm_fb_helper_{add,add_all,remove}_one_connector
- fbdev: some cleanups and dead-code removal
- Conversions to simple-encoder
- zero-length array removal
- Panel: panel-dpi support in panel-simple, Novatek NT35510, Elida
KD35T133,
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20200309135439.dicfnbo4ikj4tkz7@gilmour
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 124 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 11 |
4 files changed, 131 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index f397ff97b4e4..438d10ae343b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -28,6 +28,7 @@ #include <linux/file.h> #include <linux/pagemap.h> #include <linux/sync_file.h> +#include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> #include <drm/drm_syncobj.h> @@ -415,7 +416,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, /* Don't move this buffer if we have depleted our allowance * to move it. Don't move anything if the threshold is zero. */ - if (p->bytes_moved < p->bytes_moved_threshold) { + if (p->bytes_moved < p->bytes_moved_threshold && + (!bo->tbo.base.dma_buf || + list_empty(&bo->tbo.base.dma_buf->attachments))) { if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { /* And don't move a CPU_ACCESS_REQUIRED BO to limited diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index a59cd47aa6c1..ffeb20f11c07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -223,6 +223,37 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, } /** + * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation + * + * @attach: attachment to pin down + * + * Pin the BO which is backing the DMA-buf so that it can't move any more. + */ +static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + /* pin buffer into GTT */ + return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); +} + +/** + * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation + * + * @attach: attachment to unpin + * + * Unpin a previously pinned BO to make it movable again. + */ +static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + amdgpu_bo_unpin(bo); +} + +/** * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation * @attach: DMA-buf attachment * @dir: DMA direction @@ -244,9 +275,19 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, struct sg_table *sgt; long r; - r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); - if (r) - return ERR_PTR(r); + if (!bo->pin_count) { + /* move buffer into GTT */ + struct ttm_operation_ctx ctx = { false, false }; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + return ERR_PTR(r); + + } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) & + AMDGPU_GEM_DOMAIN_GTT)) { + return ERR_PTR(-EBUSY); + } sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages); if (IS_ERR(sgt)) @@ -277,13 +318,9 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - struct drm_gem_object *obj = attach->dmabuf->priv; - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); sg_free_table(sgt); kfree(sgt); - amdgpu_bo_unpin(bo); } /** @@ -327,9 +364,10 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, } const struct dma_buf_ops amdgpu_dmabuf_ops = { - .dynamic_mapping = true, .attach = amdgpu_dma_buf_attach, .detach = amdgpu_dma_buf_detach, + .pin = amdgpu_dma_buf_pin, + .unpin = amdgpu_dma_buf_unpin, .map_dma_buf = amdgpu_dma_buf_map, .unmap_dma_buf = amdgpu_dma_buf_unmap, .release = drm_gem_dmabuf_release, @@ -413,6 +451,73 @@ error: } /** + * amdgpu_dma_buf_move_notify - &attach.move_notify implementation + * + * @attach: the DMA-buf attachment + * + * Invalidate the DMA-buf attachment, making sure that the we re-create the + * mapping before the next use. + */ +static void +amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->importer_priv; + struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv); + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_operation_ctx ctx = { false, false }; + struct ttm_placement placement = {}; + struct amdgpu_vm_bo_base *bo_base; + int r; + + if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM) + return; + + r = ttm_bo_validate(&bo->tbo, &placement, &ctx); + if (r) { + DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r); + return; + } + + for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { + struct amdgpu_vm *vm = bo_base->vm; + struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; + + if (ticket) { + /* When we get an error here it means that somebody + * else is holding the VM lock and updating page tables + * So we can just continue here. + */ + r = dma_resv_lock(resv, ticket); + if (r) + continue; + + } else { + /* TODO: This is more problematic and we actually need + * to allow page tables updates without holding the + * lock. + */ + if (!dma_resv_trylock(resv)) + continue; + } + + r = amdgpu_vm_clear_freed(adev, vm, NULL); + if (!r) + r = amdgpu_vm_handle_moved(adev, vm); + + if (r && r != -EBUSY) + DRM_ERROR("Failed to invalidate VM page tables (%d))\n", + r); + + dma_resv_unlock(resv); + } +} + +static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = { + .move_notify = amdgpu_dma_buf_move_notify +}; + +/** * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation * @dev: DRM device * @dma_buf: Shared DMA buffer @@ -444,7 +549,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, if (IS_ERR(obj)) return obj; - attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true); + attach = dma_buf_dynamic_attach(dma_buf, dev->dev, + &amdgpu_dma_buf_attach_ops, obj); if (IS_ERR(attach)) { drm_gem_object_put(obj); return ERR_CAST(attach); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 2672dc64a310..9ae7b61f696a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -336,15 +336,12 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, &amdgpu_fb_helper_funcs); - ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, - AMDGPUFB_CONN_LIMIT); + ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper); if (ret) { kfree(rfbdev); return ret; } - drm_fb_helper_single_add_all_connectors(&rfbdev->helper); - /* disable all the possible outputs/crtcs before entering KMS mode */ if (!amdgpu_device_has_dc_support(adev)) drm_helper_disable_unused_functions(adev->ddev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e4a8c424d290..1791c084787d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -31,6 +31,7 @@ */ #include <linux/list.h> #include <linux/slab.h> +#include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> #include <drm/drm_cache.h> @@ -925,6 +926,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return 0; } + if (bo->tbo.base.import_attach) + dma_buf_pin(bo->tbo.base.import_attach); + bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; /* force to pin into visible video ram */ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) @@ -1008,6 +1012,9 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) amdgpu_bo_subtract_pin_size(bo); + if (bo->tbo.base.import_attach) + dma_buf_unpin(bo->tbo.base.import_attach); + for (i = 0; i < bo->placement.num_placement; i++) { bo->placements[i].lpfn = 0; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; @@ -1274,6 +1281,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, amdgpu_bo_kunmap(abo); + if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && + bo->mem.mem_type != TTM_PL_SYSTEM) + dma_buf_move_notify(abo->tbo.base.dma_buf); + /* remember the eviction */ if (evict) atomic64_inc(&adev->num_evictions); |