diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 119 |
1 files changed, 82 insertions, 37 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d451c359606a..6f8de11a17f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -134,6 +134,51 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); +static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo); + +static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev, + uint32_t size, + struct amdgpu_bo **bo_ptr) +{ + struct ttm_operation_ctx ctx = { true, false }; + struct amdgpu_bo *bo = NULL; + void *addr; + int r; + + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &bo, NULL, &addr); + if (r) + return r; + + if (adev->uvd.address_64_bit) + goto succ; + + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + amdgpu_uvd_force_into_uvd_segment(bo); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + goto err; + r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM); + if (r) + goto err_pin; + r = amdgpu_bo_kmap(bo, &addr); + if (r) + goto err_kmap; +succ: + amdgpu_bo_unreserve(bo); + *bo_ptr = bo; + return 0; +err_kmap: + amdgpu_bo_unpin(bo); +err_pin: +err: + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + return r; +} int amdgpu_uvd_sw_init(struct amdgpu_device *adev) { @@ -302,6 +347,10 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; + r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo); + if (r) + return r; + switch (adev->asic_type) { case CHIP_TONGA: adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; @@ -324,6 +373,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { + void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo); int i, j; drm_sched_entity_destroy(&adev->uvd.entity); @@ -342,6 +392,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); } + amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); release_firmware(adev->uvd.fw); return 0; @@ -403,7 +454,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.inst[j].saved_bo) return -ENOMEM; - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { /* re-write 0 since err_event_athub will corrupt VCPU buffer */ if (in_ras_intr) memset(adev->uvd.inst[j].saved_bo, 0, size); @@ -436,7 +487,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.inst[i].cpu_addr; if (adev->uvd.inst[i].saved_bo != NULL) { - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); drm_dev_exit(idx); } @@ -449,7 +500,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, le32_to_cpu(hdr->ucode_size_bytes)); drm_dev_exit(idx); @@ -1080,23 +1131,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, unsigned offset_idx = 0; unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; - amdgpu_bo_kunmap(bo); - amdgpu_bo_unpin(bo); - - if (!ring->adev->uvd.address_64_bit) { - struct ttm_operation_ctx ctx = { true, false }; - - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); - amdgpu_uvd_force_into_uvd_segment(bo); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) - goto err; - } - r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) - goto err; + return r; if (adev->asic_type >= CHIP_VEGA10) { offset_idx = 1 + ring->me; @@ -1147,9 +1185,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err_free; } + amdgpu_bo_reserve(bo, true); amdgpu_bo_fence(bo, f, false); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); if (fence) *fence = dma_fence_get(f); @@ -1159,10 +1197,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, err_free: amdgpu_job_free(job); - -err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -1173,16 +1207,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = adev->uvd.ib_bo; uint32_t *msg; - int r, i; - - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &bo, NULL, (void **)&msg); - if (r) - return r; + int i; + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD create msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000000); @@ -1199,6 +1228,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, msg[i] = cpu_to_le32(0x0); return amdgpu_uvd_send_msg(ring, bo, true, fence); + } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, @@ -1209,12 +1239,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, uint32_t *msg; int r, i; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &bo, NULL, (void **)&msg); - if (r) - return r; + if (direct) { + bo = adev->uvd.ib_bo; + } else { + r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo); + if (r) + return r; + } + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD destroy msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000002); @@ -1223,7 +1256,12 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 4; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_uvd_send_msg(ring, bo, direct, fence); + r = amdgpu_uvd_send_msg(ring, bo, direct, fence); + + if (!direct) + amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + + return r; } static void amdgpu_uvd_idle_work_handler(struct work_struct *work) @@ -1298,10 +1336,17 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *fence; long r; - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + r = amdgpu_uvd_get_create_msg(ring, 1, &fence); if (r) goto error; + r = dma_fence_wait_timeout(fence, false, timeout); + dma_fence_put(fence); + if (r == 0) + r = -ETIMEDOUT; + if (r < 0) + goto error; + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) goto error; |