diff options
| author | Dave Airlie <airlied@redhat.com> | 2018-11-19 04:07:52 +0300 | 
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2018-11-19 04:07:52 +0300 | 
| commit | 9235dd441af43599b9cdcce599a3da4083fcad3c (patch) | |
| tree | 5f8a79cc2d378f05e807c6c5e388394b8e86319d /drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | |
| parent | d7563c55ef9fc1fd2301b8708b3c1f53509d6745 (diff) | |
| parent | 36b486bc682114a2f1001cbf1a87f21ae381bfc1 (diff) | |
| download | linux-9235dd441af43599b9cdcce599a3da4083fcad3c.tar.xz | |
Merge branch 'drm-next-4.21' of git://people.freedesktop.org/~agd5f/linux into drm-next
New features for 4.21:
amdgpu:
- Support for SDMA paging queue on vega
- Put compute EOP buffers into vram for better performance
- Share more code with amdkfd
- Support for scanout with DCC on gfx9
- Initial kerneldoc for DC
- Updated SMU firmware support for gfx8 chips
- Rework CSA handling for eventual support for preemption
- XGMI PSP support
- Clean up RLC handling
- Enable GPU reset by default on VI, SOC15 dGPUs
- Ring and IB test cleanups
amdkfd:
- Share more code with amdgpu
ttm:
- Move global init out of the drivers
scheduler:
- Track if schedulers are ready for work
- Timeout/fault handling changes to facilitate GPU recovery
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181114165113.3751-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 140 | 
1 files changed, 42 insertions, 98 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f2f358aa0597..cfee74732edb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -23,16 +23,6 @@  #include "amdgpu.h" -uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) -{ -	uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; - -	addr -= AMDGPU_VA_RESERVED_SIZE; -	addr = amdgpu_gmc_sign_extend(addr); - -	return addr; -} -  bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)  {  	/* By now all MMIO pages except mailbox are blocked */ @@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)  	return RREG32_NO_KIQ(0xc040) == 0xffffffff;  } -int amdgpu_allocate_static_csa(struct amdgpu_device *adev) -{ -	int r; -	void *ptr; - -	r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, -				AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, -				&adev->virt.csa_vmid0_addr, &ptr); -	if (r) -		return r; - -	memset(ptr, 0, AMDGPU_CSA_SIZE); -	return 0; -} - -void amdgpu_free_static_csa(struct amdgpu_device *adev) { -	amdgpu_bo_free_kernel(&adev->virt.csa_obj, -						&adev->virt.csa_vmid0_addr, -						NULL); -} - -/* - * amdgpu_map_static_csa should be called during amdgpu_vm_init - * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command - * submission of GFX should use this virtual address within META_DATA init - * package to support SRIOV gfx preemption. - */ -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, -			  struct amdgpu_bo_va **bo_va) -{ -	uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; -	struct ww_acquire_ctx ticket; -	struct list_head list; -	struct amdgpu_bo_list_entry pd; -	struct ttm_validate_buffer csa_tv; -	int r; - -	INIT_LIST_HEAD(&list); -	INIT_LIST_HEAD(&csa_tv.head); -	csa_tv.bo = &adev->virt.csa_obj->tbo; -	csa_tv.shared = true; - -	list_add(&csa_tv.head, &list); -	amdgpu_vm_get_pd_bo(vm, &list, &pd); - -	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); -	if (r) { -		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); -		return r; -	} - -	*bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); -	if (!*bo_va) { -		ttm_eu_backoff_reservation(&ticket, &list); -		DRM_ERROR("failed to create bo_va for static CSA\n"); -		return -ENOMEM; -	} - -	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, -				AMDGPU_CSA_SIZE); -	if (r) { -		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); -		amdgpu_vm_bo_rmv(adev, *bo_va); -		ttm_eu_backoff_reservation(&ticket, &list); -		return r; -	} - -	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE, -			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | -			     AMDGPU_PTE_EXECUTABLE); - -	if (r) { -		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); -		amdgpu_vm_bo_rmv(adev, *bo_va); -		ttm_eu_backoff_reservation(&ticket, &list); -		return r; -	} - -	ttm_eu_backoff_reservation(&ticket, &list); -	return 0; -} -  void amdgpu_virt_init_setting(struct amdgpu_device *adev)  {  	/* enable virtual display */ @@ -162,9 +70,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)  	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))  		goto failed_kiq_read; -	if (in_interrupt()) -		might_sleep(); - +	might_sleep();  	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {  		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);  		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); @@ -210,9 +116,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)  	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))  		goto failed_kiq_write; -	if (in_interrupt()) -		might_sleep(); - +	might_sleep();  	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {  		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); @@ -228,6 +132,46 @@ failed_kiq_write:  	pr_err("failed to write reg:%x\n", reg);  } +void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, +					uint32_t reg0, uint32_t reg1, +					uint32_t ref, uint32_t mask) +{ +	struct amdgpu_kiq *kiq = &adev->gfx.kiq; +	struct amdgpu_ring *ring = &kiq->ring; +	signed long r, cnt = 0; +	unsigned long flags; +	uint32_t seq; + +	spin_lock_irqsave(&kiq->ring_lock, flags); +	amdgpu_ring_alloc(ring, 32); +	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, +					    ref, mask); +	amdgpu_fence_emit_polling(ring, &seq); +	amdgpu_ring_commit(ring); +	spin_unlock_irqrestore(&kiq->ring_lock, flags); + +	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + +	/* don't wait anymore for IRQ context */ +	if (r < 1 && in_interrupt()) +		goto failed_kiq; + +	might_sleep(); +	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + +		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); +		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); +	} + +	if (cnt > MAX_KIQ_REG_TRY) +		goto failed_kiq; + +	return; + +failed_kiq: +	pr_err("failed to write reg %x wait reg %x\n", reg0, reg1); +} +  /**   * amdgpu_virt_request_full_gpu() - request full gpu access   * @amdgpu:	amdgpu device.  | 
