diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2012-12-05 00:25:59 +0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2012-12-11 01:53:29 +0400 |
commit | 233d1ad59a2895e348259bb6f9f4528a75ea7752 (patch) | |
tree | 6539a16accaf1d0293b0a236db2cfdc27c22e083 /drivers/gpu | |
parent | 4d75658bffea78f0c6f82fd46df1ec983ccacdf0 (diff) | |
download | linux-233d1ad59a2895e348259bb6f9f4528a75ea7752.tar.xz |
drm/radeon/kms: Add initial support for async DMA on evergreen
Pretty similar to 6xx/7xx except the count field increased in the
packet header and the max IB size increased.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 181 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreend.h | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.h | 8 |
4 files changed, 248 insertions, 9 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 78de2e4097b5..68206df3d5d2 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2034,6 +2034,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); + WREG32(DMA_TILING_CONFIG, gb_addr_config); tmp = gb_addr_config & NUM_PIPES_MASK; tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, @@ -2405,6 +2406,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) cayman_cp_int_cntl_setup(rdev, 2, 0); } else WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; + WREG32(DMA_CNTL, tmp); WREG32(GRBM_INT_CNTL, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); @@ -2457,6 +2460,7 @@ int evergreen_irq_set(struct radeon_device *rdev) u32 grbm_int_cntl = 0; u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; + u32 dma_cntl; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -2484,6 +2488,8 @@ int evergreen_irq_set(struct radeon_device *rdev) afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; + dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; + if (rdev->family >= CHIP_CAYMAN) { /* enable CP interrupts on all rings */ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { @@ -2506,6 +2512,11 @@ int evergreen_irq_set(struct radeon_device *rdev) } } + if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { + DRM_DEBUG("r600_irq_set: sw int dma\n"); + dma_cntl |= TRAP_ENABLE; + } + if (rdev->irq.crtc_vblank_int[0] || atomic_read(&rdev->irq.pflip[0])) { DRM_DEBUG("evergreen_irq_set: vblank 0\n"); @@ -2591,6 +2602,9 @@ int evergreen_irq_set(struct radeon_device *rdev) cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2); } else WREG32(CP_INT_CNTL, cp_int_cntl); + + WREG32(DMA_CNTL, dma_cntl); + WREG32(GRBM_INT_CNTL, grbm_int_cntl); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); @@ -3126,6 +3140,10 @@ restart_ih: } else radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); break; + case 224: /* DMA trap event */ + DRM_DEBUG("IH: DMA trap\n"); + radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); + break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); break; @@ -3154,6 +3172,143 @@ restart_ih: return IRQ_HANDLED; } +/** + * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring + * + * @rdev: radeon_device pointer + * @fence: radeon fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (evergreen-SI). + */ +void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence) +{ + struct radeon_ring *ring = &rdev->ring[fence->ring]; + u64 addr = rdev->fence_drv[fence->ring].gpu_addr; + /* write the fence */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); + radeon_ring_write(ring, fence->seq); + /* generate an interrupt */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); + /* flush HDP */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); + radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL); + radeon_ring_write(ring, 1); +} + +/** + * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (evergreen). + */ +void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, + struct radeon_ib *ib) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + + if (rdev->wb.enabled) { + u32 next_rptr = ring->wptr + 4; + while ((next_rptr & 7) != 5) + next_rptr++; + next_rptr += 3; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); + radeon_ring_write(ring, next_rptr); + } + + /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. + * Pad as necessary with NOPs. + */ + while ((ring->wptr & 7) != 5) + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); + radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); + radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); + +} + +/** + * evergreen_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (evergreen-cayman). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int evergreen_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_dw, cur_size_in_dw; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; + num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); + r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_dw = size_in_dw; + if (cur_size_in_dw > 0xFFFFF) + cur_size_in_dw = 0xFFFFF; + size_in_dw -= cur_size_in_dw; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); + radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, src_offset & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + src_offset += cur_size_in_dw * 4; + dst_offset += cur_size_in_dw * 4; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + static int evergreen_startup(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; @@ -3217,6 +3372,12 @@ static int evergreen_startup(struct radeon_device *rdev) return r; } + r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); + return r; + } + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -3231,12 +3392,23 @@ static int evergreen_startup(struct radeon_device *rdev) 0, 0xfffff, RADEON_CP_PACKET2); if (r) return r; + + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, + DMA_RB_RPTR, DMA_RB_WPTR, + 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + if (r) + return r; + r = evergreen_cp_load_microcode(rdev); if (r) return r; r = evergreen_cp_resume(rdev); if (r) return r; + r = r600_dma_resume(rdev); + if (r) + return r; r = radeon_ib_pool_init(rdev); if (r) { @@ -3283,11 +3455,9 @@ int evergreen_resume(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev) { - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - r600_audio_fini(rdev); r700_cp_stop(rdev); - ring->ready = false; + r600_dma_stop(rdev); evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); @@ -3364,6 +3534,9 @@ int evergreen_init(struct radeon_device *rdev) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); + rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -3376,6 +3549,7 @@ int evergreen_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); @@ -3403,6 +3577,7 @@ void evergreen_fini(struct radeon_device *rdev) r600_audio_fini(rdev); r600_blit_fini(rdev); r700_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index cae7ab4219ef..92d1f4521caf 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -905,6 +905,35 @@ # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) # define DC_HPDx_EN (1 << 28) +/* ASYNC DMA */ +#define DMA_RB_RPTR 0xd008 +#define DMA_RB_WPTR 0xd00c + +#define DMA_CNTL 0xd02c +# define TRAP_ENABLE (1 << 0) +# define SEM_INCOMPLETE_INT_ENABLE (1 << 1) +# define SEM_WAIT_INT_ENABLE (1 << 2) +# define DATA_SWAP_ENABLE (1 << 3) +# define FENCE_SWAP_ENABLE (1 << 4) +# define CTXEMPTY_INT_ENABLE (1 << 28) +#define DMA_TILING_CONFIG 0xD0B8 + +/* async DMA packets */ +#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ + (((t) & 0x1) << 23) | \ + (((s) & 0x1) << 22) | \ + (((n) & 0xFFFFF) << 0)) +/* async DMA Packet types */ +#define DMA_PACKET_WRITE 0x2 +#define DMA_PACKET_COPY 0x3 +#define DMA_PACKET_INDIRECT_BUFFER 0x4 +#define DMA_PACKET_SEMAPHORE 0x5 +#define DMA_PACKET_FENCE 0x6 +#define DMA_PACKET_TRAP 0x7 +#define DMA_PACKET_SRBM_WRITE 0x9 +#define DMA_PACKET_CONSTANT_FILL 0xd +#define DMA_PACKET_NOP 0xf + /* PCIE link stuff */ #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 3cf9b29fb53f..1dd8d927e035 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1199,6 +1199,15 @@ static struct radeon_asic evergreen_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &evergreen_gpu_is_lockup, + }, + [R600_RING_TYPE_DMA_INDEX] = { + .ib_execute = &evergreen_dma_ring_ib_execute, + .emit_fence = &evergreen_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, } }, .irq = { @@ -1215,8 +1224,8 @@ static struct radeon_asic evergreen_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = NULL, - .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, + .dma = &evergreen_copy_dma, + .dma_ring_index = R600_RING_TYPE_DMA_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, @@ -1275,6 +1284,15 @@ static struct radeon_asic sumo_asic = { .ib_test = &r600_ib_test, .is_lockup = &evergreen_gpu_is_lockup, }, + [R600_RING_TYPE_DMA_INDEX] = { + .ib_execute = &evergreen_dma_ring_ib_execute, + .emit_fence = &evergreen_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, + } }, .irq = { .set = &evergreen_irq_set, @@ -1290,8 +1308,8 @@ static struct radeon_asic sumo_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = NULL, - .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, + .dma = &evergreen_copy_dma, + .dma_ring_index = R600_RING_TYPE_DMA_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, @@ -1349,6 +1367,15 @@ static struct radeon_asic btc_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &evergreen_gpu_is_lockup, + }, + [R600_RING_TYPE_DMA_INDEX] = { + .ib_execute = &evergreen_dma_ring_ib_execute, + .emit_fence = &evergreen_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, } }, .irq = { @@ -1365,8 +1392,8 @@ static struct radeon_asic btc_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = NULL, - .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, + .dma = &evergreen_copy_dma, + .dma_ring_index = R600_RING_TYPE_DMA_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 70a5b1f0e43e..7a2705d0a4d9 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -441,6 +441,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc); void evergreen_disable_interrupt_state(struct radeon_device *rdev); int evergreen_blit_init(struct radeon_device *rdev); int evergreen_mc_wait_for_idle(struct radeon_device *rdev); +void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence); +void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, + struct radeon_ib *ib); +int evergreen_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence); /* * cayman |