diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2015-02-24 20:38:42 +0300 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-03-19 19:26:47 +0300 |
commit | f6b355dda471879a69ddfa83d2db673b61da6c07 (patch) | |
tree | 8dae7426887979a26fca5ba34d468922857c79bc /drivers/gpu/drm/radeon/cik.c | |
parent | 47f2467fffc4e1a070b141bc9d1319dc2c0acea5 (diff) | |
download | linux-f6b355dda471879a69ddfa83d2db673b61da6c07.tar.xz |
radeon/cik: add support for short HPD irqs
This adds support to process short HPD irqs on CIK gpus.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/cik.c')
-rw-r--r-- | drivers/gpu/drm/radeon/cik.c | 99 |
1 files changed, 87 insertions, 12 deletions
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e1db25e48688..28faea9996f9 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -7427,12 +7427,12 @@ int cik_irq_set(struct radeon_device *rdev) (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -7519,27 +7519,27 @@ int cik_irq_set(struct radeon_device *rdev) } if (rdev->irq.hpd[0]) { DRM_DEBUG("cik_irq_set: hpd 1\n"); - hpd1 |= DC_HPDx_INT_EN; + hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[1]) { DRM_DEBUG("cik_irq_set: hpd 2\n"); - hpd2 |= DC_HPDx_INT_EN; + hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[2]) { DRM_DEBUG("cik_irq_set: hpd 3\n"); - hpd3 |= DC_HPDx_INT_EN; + hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[3]) { DRM_DEBUG("cik_irq_set: hpd 4\n"); - hpd4 |= DC_HPDx_INT_EN; + hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[4]) { DRM_DEBUG("cik_irq_set: hpd 5\n"); - hpd5 |= DC_HPDx_INT_EN; + hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[5]) { DRM_DEBUG("cik_irq_set: hpd 6\n"); - hpd6 |= DC_HPDx_INT_EN; + hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } WREG32(CP_INT_CNTL_RING0, cp_int_cntl); @@ -7711,6 +7711,36 @@ static inline void cik_irq_ack(struct radeon_device *rdev) tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } + if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } } /** @@ -7836,6 +7866,7 @@ int cik_irq_process(struct radeon_device *rdev) u8 me_id, pipe_id, queue_id; u32 ring_index; bool queue_hotplug = false; + bool queue_dp = false; bool queue_reset = false; u32 addr, status, mc_client; bool queue_thermal = false; @@ -8081,6 +8112,48 @@ restart_ih: DRM_DEBUG("IH: HPD6\n"); } break; + case 6: + if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); + } + break; + case 7: + if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); + } + break; + case 8: + if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); + } + break; + case 9: + if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); + } + break; + case 10: + if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); + } + break; + case 11: + if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); + } + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; @@ -8289,6 +8362,8 @@ restart_ih: rptr &= rdev->ih.ptr_mask; WREG32(IH_RB_RPTR, rptr); } + if (queue_dp) + schedule_work(&rdev->dp_work); if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_reset) { |