diff options
Diffstat (limited to 'drivers/gpu/drm/radeon')
29 files changed, 681 insertions, 186 deletions
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 967d193d36d0..76c30f2da3fb 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -270,8 +270,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) switch (mode) { case DRM_MODE_DPMS_ON: radeon_crtc->enabled = true; - /* adjust pm to dpms changes BEFORE enabling crtcs */ - radeon_pm_compute_clocks(rdev); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); @@ -289,10 +287,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; - /* adjust pm to dpms changes AFTER disabling crtcs */ - radeon_pm_compute_clocks(rdev); break; } + /* adjust pm to dpms */ + radeon_pm_compute_clocks(rdev); } static void @@ -1208,27 +1206,43 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, /* Set NUM_BANKS. */ if (rdev->family >= CHIP_TAHITI) { - unsigned tileb, index, num_banks, tile_split_bytes; + unsigned index, num_banks; - /* Calculate the macrotile mode index. */ - tile_split_bytes = 64 << tile_split; - tileb = 8 * 8 * target_fb->bits_per_pixel / 8; - tileb = min(tile_split_bytes, tileb); + if (rdev->family >= CHIP_BONAIRE) { + unsigned tileb, tile_split_bytes; - for (index = 0; tileb > 64; index++) { - tileb >>= 1; - } + /* Calculate the macrotile mode index. */ + tile_split_bytes = 64 << tile_split; + tileb = 8 * 8 * target_fb->bits_per_pixel / 8; + tileb = min(tile_split_bytes, tileb); - if (index >= 16) { - DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", - target_fb->bits_per_pixel, tile_split); - return -EINVAL; - } + for (index = 0; tileb > 64; index++) + tileb >>= 1; + + if (index >= 16) { + DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", + target_fb->bits_per_pixel, tile_split); + return -EINVAL; + } - if (rdev->family >= CHIP_BONAIRE) num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; - else + } else { + switch (target_fb->bits_per_pixel) { + case 8: + index = 10; + break; + case 16: + index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP; + break; + default: + case 32: + index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP; + break; + } + num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; + } + fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); } else { /* NI and older. */ @@ -1751,8 +1765,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) } /* otherwise, pick one of the plls */ if ((rdev->family == CHIP_KAVERI) || - (rdev->family == CHIP_KABINI)) { - /* KB/KV has PPLL1 and PPLL2 */ + (rdev->family == CHIP_KABINI) || + (rdev->family == CHIP_MULLINS)) { + /* KB/KV/ML has PPLL1 and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -1916,6 +1931,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) is_tvcv = true; + if (!radeon_crtc->adjusted_clock) + return -EINVAL; + atombios_crtc_set_pll(crtc, adjusted_mode); if (ASIC_IS_DCE4(rdev)) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index a54c44181a0f..c5b1f2da3954 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -390,11 +390,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) return; - if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3)) + if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3) DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); - if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3)) + if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3) DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); } @@ -443,21 +443,23 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, if (dp_bridge != ENCODER_OBJECT_ID_NONE) { /* DP bridge chips */ - drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, - DP_EDP_CONFIGURATION_CAP, &tmp); - if (tmp & 1) - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; - else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || - (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) - panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; - else - panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, + DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || + (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) + panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; + else + panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + } } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { /* eDP */ - drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, - DP_EDP_CONFIGURATION_CAP, &tmp); - if (tmp & 1) - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, + DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + } } return panel_mode; @@ -833,11 +835,15 @@ void radeon_dp_link_train(struct drm_encoder *encoder, else dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; - drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp); - if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) - dp_info.tp3_supported = true; - else + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp) + == 1) { + if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) + dp_info.tp3_supported = true; + else + dp_info.tp3_supported = false; + } else { dp_info.tp3_supported = false; + } memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); dp_info.rdev = rdev; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a5181404f130..69a00d64716e 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -63,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin"); MODULE_FIRMWARE("radeon/KABINI_mec.bin"); MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); +MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); +MODULE_FIRMWARE("radeon/MULLINS_me.bin"); +MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); +MODULE_FIRMWARE("radeon/MULLINS_mec.bin"); +MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); +MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); @@ -1473,6 +1479,43 @@ static const u32 hawaii_mgcg_cgcg_init[] = 0xd80c, 0xff000ff0, 0x00000100 }; +static const u32 godavari_golden_registers[] = +{ + 0x55e4, 0xff607fff, 0xfc000100, + 0x6ed8, 0x00010101, 0x00010000, + 0x9830, 0xffffffff, 0x00000000, + 0x98302, 0xf00fffff, 0x00000400, + 0x6130, 0xffffffff, 0x00010000, + 0x5bb0, 0x000000f0, 0x00000070, + 0x5bc0, 0xf0311fff, 0x80300000, + 0x98f8, 0x73773777, 0x12010001, + 0x98fc, 0xffffffff, 0x00000010, + 0x8030, 0x00001f0f, 0x0000100a, + 0x2f48, 0x73773777, 0x12010001, + 0x2408, 0x000fffff, 0x000c007f, + 0x8a14, 0xf000003f, 0x00000007, + 0x8b24, 0xffffffff, 0x00ff0fff, + 0x30a04, 0x0000ff0f, 0x00000000, + 0x28a4c, 0x07ffffff, 0x06000000, + 0x4d8, 0x00000fff, 0x00000100, + 0xd014, 0x00010000, 0x00810001, + 0xd814, 0x00010000, 0x00810001, + 0x3e78, 0x00000001, 0x00000002, + 0xc768, 0x00000008, 0x00000008, + 0xc770, 0x00000f00, 0x00000800, + 0xc774, 0x00000f00, 0x00000800, + 0xc798, 0x00ffffff, 0x00ff7fbf, + 0xc79c, 0x00ffffff, 0x00ff7faf, + 0x8c00, 0x000000ff, 0x00000001, + 0x214f8, 0x01ff01ff, 0x00000002, + 0x21498, 0x007ff800, 0x00200000, + 0x2015c, 0xffffffff, 0x00000f40, + 0x88c4, 0x001f3ae3, 0x00000082, + 0x88d4, 0x0000001f, 0x00000010, + 0x30934, 0xffffffff, 0x00000000 +}; + + static void cik_init_golden_registers(struct radeon_device *rdev) { switch (rdev->family) { @@ -1504,6 +1547,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev) kalindi_golden_spm_registers, (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); break; + case CHIP_MULLINS: + radeon_program_register_sequence(rdev, + kalindi_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); + radeon_program_register_sequence(rdev, + godavari_golden_registers, + (const u32)ARRAY_SIZE(godavari_golden_registers)); + radeon_program_register_sequence(rdev, + kalindi_golden_common_registers, + (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); + radeon_program_register_sequence(rdev, + kalindi_golden_spm_registers, + (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); + break; case CHIP_KAVERI: radeon_program_register_sequence(rdev, spectre_mgcg_cgcg_init, @@ -1834,6 +1891,15 @@ static int cik_init_microcode(struct radeon_device *rdev) rlc_req_size = KB_RLC_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; break; + case CHIP_MULLINS: + chip_name = "MULLINS"; + pfp_req_size = CIK_PFP_UCODE_SIZE * 4; + me_req_size = CIK_ME_UCODE_SIZE * 4; + ce_req_size = CIK_CE_UCODE_SIZE * 4; + mec_req_size = CIK_MEC_UCODE_SIZE * 4; + rlc_req_size = ML_RLC_UCODE_SIZE * 4; + sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; + break; default: BUG(); } @@ -3272,6 +3338,7 @@ static void cik_gpu_init(struct radeon_device *rdev) gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_KABINI: + case CHIP_MULLINS: default: rdev->config.cik.max_shader_engines = 1; rdev->config.cik.max_tile_pipes = 2; @@ -3702,6 +3769,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } @@ -5803,6 +5871,9 @@ static int cik_rlc_resume(struct radeon_device *rdev) case CHIP_KABINI: size = KB_RLC_UCODE_SIZE; break; + case CHIP_MULLINS: + size = ML_RLC_UCODE_SIZE; + break; } cik_rlc_stop(rdev); @@ -6551,6 +6622,7 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) buffer[count++] = cpu_to_le32(0x00000000); break; case CHIP_KABINI: + case CHIP_MULLINS: buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ buffer[count++] = cpu_to_le32(0x00000000); break; @@ -6696,6 +6768,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } + /* pflip */ + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } /* dac hotplug */ WREG32(DAC_AUTODETECT_INT_CONTROL, 0); @@ -7052,6 +7137,25 @@ int cik_irq_set(struct radeon_device *rdev) WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); WREG32(DC_HPD3_INT_CONTROL, hpd3); @@ -7088,6 +7192,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev) rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); + rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC0_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC1_REGISTER_OFFSET); + if (rdev->num_crtc >= 4) { + rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC2_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC3_REGISTER_OFFSET); + } + if (rdev->num_crtc >= 6) { + rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC4_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC5_REGISTER_OFFSET); + } + + if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) @@ -7098,6 +7225,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); if (rdev->num_crtc >= 4) { + if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) @@ -7109,6 +7242,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) } if (rdev->num_crtc >= 6) { + if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) @@ -7460,6 +7599,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 3c2407bad1f0..1347162ca1a4 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -562,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 0b27ea08c299..ae88660f34ea 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -889,6 +889,15 @@ # define DC_HPD6_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 +/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ +#define GRPH_INT_STATUS 0x6858 +# define GRPH_PFLIP_INT_OCCURRED (1 << 0) +# define GRPH_PFLIP_INT_CLEAR (1 << 8) +/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ +#define GRPH_INT_CONTROL 0x685c +# define GRPH_PFLIP_INT_MASK (1 << 0) +# define GRPH_PFLIP_INT_TYPE (1 << 8) + #define DAC_AUTODETECT_INT_CONTROL 0x67c8 #define DC_HPD1_INT_STATUS 0x601c diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 0318230ef274..653eff814504 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4355,7 +4355,6 @@ int evergreen_irq_set(struct radeon_device *rdev) u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; u32 dma_cntl, dma_cntl1 = 0; u32 thermal_int = 0; @@ -4538,15 +4537,21 @@ int evergreen_irq_set(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -4935,6 +4940,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 287fe966d7de..478caefe0fef 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 16ec9d56a234..3f6e817d97ee 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev, return 0; } +static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_2bit) +{ + struct radeon_clock_voltage_dependency_table *vddc_sclk_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + if (vid_2bit < vddc_sclk_table->count) + return vddc_sclk_table->entries[vid_2bit].v; + else + return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) + return vid_mapping_table->entries[i].vid_7bit; + } + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; + } +} + +static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_7bit) +{ + struct radeon_clock_voltage_dependency_table *vddc_sclk_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + for (i = 0; i < vddc_sclk_table->count; i++) { + if (vddc_sclk_table->entries[i].v == vid_7bit) + return i; + } + return vddc_sclk_table->count - 1; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) + return vid_mapping_table->entries[i].vid_2bit; + } + + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; + } +} + static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, u16 voltage) { @@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, u32 vid_2bit) { struct kv_power_info *pi = kv_get_pi(rdev); - u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev, - &pi->sys_info.vid_mapping_table, - vid_2bit); + u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, + &pi->sys_info.vid_mapping_table, + vid_2bit); return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); } @@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev) static int kv_unforce_levels(struct radeon_device *rdev) { - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); else return kv_set_enabled_levels(rdev); @@ -1362,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) struct radeon_uvd_clock_voltage_dependency_table *table = &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; int ret; + u32 mask; if (!gate) { - if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state) + if (table->count) pi->uvd_boot_level = table->count - 1; else pi->uvd_boot_level = 0; + if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { + mask = 1 << pi->uvd_boot_level; + } else { + mask = 0x1f; + } + ret = kv_copy_bytes_to_smc(rdev, pi->dpm_table_start + offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), @@ -1377,11 +1430,9 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) if (ret) return ret; - if (!pi->caps_uvd_dpm || - pi->caps_stable_p_state) - kv_send_msg_to_smc_with_parameter(rdev, - PPSMC_MSG_UVDDPM_SetEnabledMask, - (1 << pi->uvd_boot_level)); + kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_UVDDPM_SetEnabledMask, + mask); } return kv_enable_uvd_dpm(rdev, !gate); @@ -1617,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) if (pi->acp_power_gated == gate) return; - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return; pi->acp_power_gated = gate; @@ -1786,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) } } - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { if (pi->enable_dpm) { kv_set_valid_clock_range(rdev, new_ps); kv_update_dfs_bypass_settings(rdev, new_ps); @@ -1812,6 +1863,8 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) return ret; } kv_update_sclk_t(rdev); + if (rdev->family == CHIP_MULLINS) + kv_enable_nb_dpm(rdev); } } else { if (pi->enable_dpm) { @@ -1862,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { kv_force_lowest_valid(rdev); kv_init_graphics_levels(rdev); kv_program_bootup_state(rdev); @@ -1901,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev, static void kv_patch_voltage_values(struct radeon_device *rdev) { int i; - struct radeon_uvd_clock_voltage_dependency_table *table = + struct radeon_uvd_clock_voltage_dependency_table *uvd_table = &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct radeon_vce_clock_voltage_dependency_table *vce_table = + &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct radeon_clock_voltage_dependency_table *samu_table = + &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + struct radeon_clock_voltage_dependency_table *acp_table = + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - if (table->count) { - for (i = 0; i < table->count; i++) - table->entries[i].v = + if (uvd_table->count) { + for (i = 0; i < uvd_table->count; i++) + uvd_table->entries[i].v = kv_convert_8bit_index_to_voltage(rdev, - table->entries[i].v); + uvd_table->entries[i].v); + } + + if (vce_table->count) { + for (i = 0; i < vce_table->count; i++) + vce_table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + vce_table->entries[i].v); + } + + if (samu_table->count) { + for (i = 0; i < samu_table->count; i++) + samu_table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + samu_table->entries[i].v); + } + + if (acp_table->count) { + for (i = 0; i < acp_table->count; i++) + acp_table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + acp_table->entries[i].v); } } @@ -1941,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev) break; } - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); else return kv_set_enabled_level(rdev, i); @@ -1961,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev) break; } - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); else return kv_set_enabled_level(rdev, i); @@ -2118,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, else pi->battery_state = false; - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { ps->dpm0_pg_nb_ps_lo = 0x1; ps->dpm0_pg_nb_ps_hi = 0x0; ps->dpmx_nb_ps_lo = 0x1; @@ -2179,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) if (pi->lowest_valid > pi->highest_valid) return -EINVAL; - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { pi->graphics_level[i].GnbSlow = 1; pi->graphics_level[i].ForceNbPs1 = 0; @@ -2253,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev) break; kv_set_divider_value(rdev, i, table->entries[i].clk); - vid_2bit = sumo_convert_vid7_to_vid2(rdev, - &pi->sys_info.vid_mapping_table, - table->entries[i].v); + vid_2bit = kv_convert_vid7_to_vid2(rdev, + &pi->sys_info.vid_mapping_table, + table->entries[i].v); kv_set_vid(rdev, i, vid_2bit); kv_set_at(rdev, i, pi->at[i]); kv_dpm_power_level_enabled_for_throttle(rdev, i, true); @@ -2324,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev, struct kv_power_info *pi = kv_get_pi(rdev); u32 nbdpmconfig1; - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return; if (pi->sys_info.nb_dpm_enable) { @@ -2631,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev) pi->sram_end = SMC_RAM_END; - if (rdev->family == CHIP_KABINI) - pi->high_voltage_t = 4001; - pi->enable_nb_dpm = true; pi->caps_power_containment = true; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 436e55092e9d..c75881223d18 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2839,6 +2839,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } @@ -3505,7 +3506,6 @@ int r600_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; u32 hdmi0, hdmi1; - u32 d1grph = 0, d2grph = 0; u32 dma_cntl; u32 thermal_int = 0; @@ -3614,8 +3614,8 @@ int r600_irq_set(struct radeon_device *rdev) WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(DMA_CNTL, dma_cntl); WREG32(DxMODE_INT_MASK, mode_int); - WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); - WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); + WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); + WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); WREG32(GRBM_INT_CNTL, grbm_int_cntl); if (ASIC_IS_DCE3(rdev)) { WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -3918,6 +3918,14 @@ restart_ih: break; } break; + case 9: /* D1 pflip */ + DRM_DEBUG("IH: D1 flip\n"); + radeon_crtc_handle_flip(rdev, 0); + break; + case 11: /* D2 pflip */ + DRM_DEBUG("IH: D2 flip\n"); + radeon_crtc_handle_flip(rdev, 1); + break; case 19: /* HPD/DAC hotplug */ switch (src_data) { case 0: diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 53fcb28f5578..4969cef44a19 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -489,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index dd4da88b3ab1..7501ba318c67 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -732,6 +732,12 @@ struct cik_irq_stat_regs { u32 disp_int_cont4; u32 disp_int_cont5; u32 disp_int_cont6; + u32 d1grph_int; + u32 d2grph_int; + u32 d3grph_int; + u32 d4grph_int; + u32 d5grph_int; + u32 d6grph_int; }; union radeon_irq_stat_regs { @@ -1647,6 +1653,7 @@ struct radeon_vce { unsigned fb_version; atomic_t handles[RADEON_MAX_VCE_HANDLES]; struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; + unsigned img_size[RADEON_MAX_VCE_HANDLES]; struct delayed_work idle_work; }; @@ -1660,7 +1667,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence); void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); void radeon_vce_note_usage(struct radeon_device *rdev); -int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size); int radeon_vce_cs_parse(struct radeon_cs_parser *p); bool radeon_vce_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, @@ -2644,7 +2651,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) -#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) +#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \ + (rdev->family == CHIP_MULLINS)) #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ (rdev->ddev->pdev->device == 0x6850) || \ diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index d8e1587d89cf..34ea53d980a1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2029,8 +2029,8 @@ static struct radeon_asic ci_asic = { .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &cik_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, - .copy = &cik_copy_dma, - .copy_ring_index = R600_RING_TYPE_DMA_INDEX, + .copy = &cik_copy_cpdma, + .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, @@ -2494,6 +2494,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: rdev->asic = &kv_asic; /* set num crtcs */ if (rdev->family == CHIP_KAVERI) { diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b3633d9a5317..9ab30976287d 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } } + if (!found) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } + } + if (!found) return false; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 2b6e0ebcc13a..41ecf8a60611 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) uint32_t domain = r->write_domain ? r->write_domain : r->read_domains; + if (domain & RADEON_GEM_DOMAIN_CPU) { + DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " + "for command submission\n"); + return -EINVAL; + } + p->relocs[i].domain = domain; if (domain == RADEON_GEM_DOMAIN_VRAM) domain |= RADEON_GEM_DOMAIN_GTT; @@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return -EINVAL; /* we only support VM on some SI+ rings */ - if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && - ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { - DRM_ERROR("Ring %d requires VM!\n", p->ring); - return -EINVAL; + if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { + if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { + DRM_ERROR("Ring %d requires VM!\n", p->ring); + return -EINVAL; + } + } else { + if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { + DRM_ERROR("VM not supported on ring %d!\n", + p->ring); + return -EINVAL; + } } } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 9aa1afd1786e..31565de1116c 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -99,6 +99,7 @@ static const char radeon_family_name[][16] = { "KAVERI", "KABINI", "HAWAII", + "MULLINS", "LAST", }; @@ -1533,11 +1534,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) radeon_restore_bios_scratch_regs(rdev); - if (fbcon) { - radeon_fbdev_set_suspend(rdev, 0); - console_unlock(); - } - /* init dig PHYs, disp eng pll */ if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); @@ -1562,6 +1558,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } drm_kms_helper_poll_enable(dev); + + /* set the power state here in case we are a PX system or headless */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) + radeon_pm_compute_clocks(rdev); + + if (fbcon) { + radeon_fbdev_set_suspend(rdev, 0); + console_unlock(); + } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index e330e762c360..a4e725c6b8c8 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -289,6 +289,10 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) u32 update_pending; int vpos, hpos; + /* can happen during initialization */ + if (radeon_crtc == NULL) + return; + spin_lock_irqsave(&rdev->ddev->event_lock, flags); work = radeon_crtc->flip_work; if (work == NULL) { @@ -872,14 +876,14 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den, /* make sure nominator is large enough */ if (*nom < nom_min) { - tmp = (nom_min + *nom - 1) / *nom; + tmp = DIV_ROUND_UP(nom_min, *nom); *nom *= tmp; *den *= tmp; } /* make sure the denominator is large enough */ if (*den < den_min) { - tmp = (den_min + *den - 1) / *den; + tmp = DIV_ROUND_UP(den_min, *den); *nom *= tmp; *den *= tmp; } @@ -904,7 +908,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, unsigned *fb_div, unsigned *ref_div) { /* limit reference * post divider to a maximum */ - ref_div_max = min(210 / post_div, ref_div_max); + ref_div_max = max(min(100 / post_div, ref_div_max), 1u); /* get matching reference and feedback divider */ *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); @@ -1039,6 +1043,16 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, /* this also makes sure that the reference divider is large enough */ avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); + /* avoid high jitter with small fractional dividers */ + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { + fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); + if (fb_div < fb_div_min) { + unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); + fb_div *= tmp; + ref_div *= tmp; + } + } + /* and finally save the result */ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { *fb_div_p = fb_div / 10; diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 9da5da4ffd17..4b7b87f71a63 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -97,6 +97,7 @@ enum radeon_family { CHIP_KAVERI, CHIP_KABINI, CHIP_HAWAII, + CHIP_MULLINS, CHIP_LAST, }; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0cc47f12d995..eaaedba04675 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return r; } - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (r) { - radeon_vm_fini(rdev, &fpriv->vm); - kfree(fpriv); - return r; - } + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } - /* map the ib pool buffer read only into - * virtual address space */ - bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, - rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, - RADEON_VM_PAGE_READABLE | - RADEON_VM_PAGE_SNOOPED); + /* map the ib pool buffer read only into + * virtual address space */ + bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, + rdev->ring_tmp_bo.bo); + r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, + RADEON_VM_PAGE_READABLE | + RADEON_VM_PAGE_SNOOPED); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); - if (r) { - radeon_vm_fini(rdev, &fpriv->vm); - kfree(fpriv); - return r; + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } } - file_priv->driver_priv = fpriv; } @@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev, struct radeon_bo_va *bo_va; int r; - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (!r) { - bo_va = radeon_vm_bo_find(&fpriv->vm, - rdev->ring_tmp_bo.bo); - if (bo_va) - radeon_vm_bo_rmv(rdev, bo_va); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (!r) { + bo_va = radeon_vm_bo_find(&fpriv->vm, + rdev->ring_tmp_bo.bo); + if (bo_va) + radeon_vm_bo_rmv(rdev, bo_va); + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + } } radeon_vm_fini(rdev, &fpriv->vm); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 95197aa4de4a..2918087e572f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, * into account. We don't want to disallow buffer moves * completely. */ - if (current_domain != RADEON_GEM_DOMAIN_CPU && + if ((lobj->alt_domain & current_domain) != 0 && (domain & current_domain) == 0 && /* will be moved */ bytes_moved > bytes_moved_threshold) { /* don't move it */ @@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); rdev = rbo->rdev; - if (bo->mem.mem_type == TTM_PL_VRAM) { - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; - if ((offset + size) > rdev->mc.visible_vram_size) { - /* hurrah the memory is not visible ! */ - radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; - r = ttm_bo_validate(bo, &rbo->placement, false, false); - if (unlikely(r != 0)) - return r; - offset = bo->mem.start << PAGE_SHIFT; - /* this should not happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; - } + if (bo->mem.mem_type != TTM_PL_VRAM) + return 0; + + size = bo->mem.num_pages << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; + if ((offset + size) <= rdev->mc.visible_vram_size) + return 0; + + /* hurrah the memory is not visible ! */ + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); + rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + r = ttm_bo_validate(bo, &rbo->placement, false, false); + if (unlikely(r == -ENOMEM)) { + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); + return ttm_bo_validate(bo, &rbo->placement, false, false); + } else if (unlikely(r != 0)) { + return r; } + + offset = bo->mem.start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return -EINVAL; + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6fac8efe8340..2bdae61c0ac0 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set profile when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (strncmp("default", buf, strlen("default")) == 0) @@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set method when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + count = -EINVAL; + goto fail; + } + /* we don't support the legacy modes with dpm */ if (rdev->pm.pm_method == PM_METHOD_DPM) { count = -EINVAL; @@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); @@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set dpm state when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("battery", buf, strlen("battery")) == 0) rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; @@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); @@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, enum radeon_dpm_forced_level level; int ret = 0; + /* Can't force performance level when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("low", buf, strlen("low")) == 0) { level = RADEON_DPM_FORCED_LEVEL_LOW; @@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, char *buf) { struct radeon_device *rdev = dev_get_drvdata(dev); + struct drm_device *ddev = rdev->ddev; int temp; + /* Can't get temperature when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + if (rdev->asic->pm.get_temperature) temp = radeon_get_temperature(rdev); else @@ -1068,7 +1104,6 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev) if (ret) goto dpm_resume_fail; rdev->pm.dpm_enabled = true; - radeon_pm_compute_clocks(rdev); return; dpm_resume_fail: @@ -1300,6 +1335,7 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_KABINI: case CHIP_KAVERI: case CHIP_HAWAII: + case CHIP_MULLINS: /* DPM requires the RLC, RV770+ dGPU requires SMC */ if (!rdev->rlc_fw) rdev->pm.pm_method = PM_METHOD_PROFILE; @@ -1613,8 +1649,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; + struct drm_device *ddev = rdev->ddev; - if (rdev->pm.dpm_enabled) { + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + seq_printf(m, "PX asic powered off\n"); + } else if (rdev->pm.dpm_enabled) { mutex_lock(&rdev->pm.mutex); if (rdev->asic->dpm.debugfs_print_current_performance_level) radeon_dpm_debugfs_print_current_performance_level(rdev, m); diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h index 58d12938c0b8..4e7c3269b183 100644 --- a/drivers/gpu/drm/radeon/radeon_ucode.h +++ b/drivers/gpu/drm/radeon/radeon_ucode.h @@ -52,6 +52,7 @@ #define BONAIRE_RLC_UCODE_SIZE 2048 #define KB_RLC_UCODE_SIZE 2560 #define KV_RLC_UCODE_SIZE 2560 +#define ML_RLC_UCODE_SIZE 2560 /* MC */ #define BTC_MC_UCODE_SIZE 6024 diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 5748bdaeacce..1b65ae2433cd 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev) case CHIP_KABINI: case CHIP_KAVERI: case CHIP_HAWAII: + case CHIP_MULLINS: fw_name = FIRMWARE_BONAIRE; break; @@ -465,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, cmd = radeon_get_ib_value(p, p->idx) >> 1; if (cmd < 0x4) { + if (end <= start) { + DRM_ERROR("invalid reloc offset %X!\n", offset); + return -EINVAL; + } if ((end - start) < buf_sizes[cmd]) { DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned)(end - start), buf_sizes[cmd]); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index ced53dd03e7c..3971d968af6c 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev) case CHIP_BONAIRE: case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: fw_name = FIRMWARE_BONAIRE; break; @@ -442,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, * @p: parser context * @lo: address of lower dword * @hi: address of higher dword + * @size: size of checker for relocation buffer * * Patch relocation inside command stream with real buffer address */ -int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, + unsigned size) { struct radeon_cs_chunk *relocs_chunk; - uint64_t offset; + struct radeon_cs_reloc *reloc; + uint64_t start, end, offset; unsigned idx; relocs_chunk = &p->chunks[p->chunk_relocs_idx]; @@ -461,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) return -EINVAL; } - offset += p->relocs_ptr[(idx / 4)]->gpu_offset; + reloc = p->relocs_ptr[(idx / 4)]; + start = reloc->gpu_offset; + end = start + radeon_bo_size(reloc->robj); + start += offset; - p->ib.ptr[lo] = offset & 0xFFFFFFFF; - p->ib.ptr[hi] = offset >> 32; + p->ib.ptr[lo] = start & 0xFFFFFFFF; + p->ib.ptr[hi] = start >> 32; + + if (end <= start) { + DRM_ERROR("invalid reloc offset %llX!\n", offset); + return -EINVAL; + } + if ((end - start) < size) { + DRM_ERROR("buffer to small (%d / %d)!\n", + (unsigned)(end - start), size); + return -EINVAL; + } return 0; } /** + * radeon_vce_validate_handle - validate stream handle + * + * @p: parser context + * @handle: handle to validate + * + * Validates the handle and return the found session index or -EINVAL + * we we don't have another free session index. + */ +int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) +{ + unsigned i; + + /* validate the handle */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (atomic_read(&p->rdev->vce.handles[i]) == handle) + return i; + } + + /* handle not found try to alloc a new one */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { + p->rdev->vce.filp[i] = p->filp; + p->rdev->vce.img_size[i] = 0; + return i; + } + } + + DRM_ERROR("No more free VCE handles!\n"); + return -EINVAL; +} + +/** * radeon_vce_cs_parse - parse and validate the command stream * * @p: parser context @@ -477,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) */ int radeon_vce_cs_parse(struct radeon_cs_parser *p) { - uint32_t handle = 0; - bool destroy = false; + int session_idx = -1; + bool destroyed = false; + uint32_t tmp, handle = 0; + uint32_t *size = &tmp; int i, r; while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { @@ -490,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } + if (destroyed) { + DRM_ERROR("No other command allowed after destroy!\n"); + return -EINVAL; + } + switch (cmd) { case 0x00000001: // session handle = radeon_get_ib_value(p, p->idx + 2); + session_idx = radeon_vce_validate_handle(p, handle); + if (session_idx < 0) + return session_idx; + size = &p->rdev->vce.img_size[session_idx]; break; case 0x00000002: // task info + break; + case 0x01000001: // create + *size = radeon_get_ib_value(p, p->idx + 8) * + radeon_get_ib_value(p, p->idx + 10) * + 8 * 3 / 2; + break; + case 0x04000001: // config extension case 0x04000002: // pic control case 0x04000005: // rate control @@ -505,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) break; case 0x03000001: // encode - r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); + r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, + *size); if (r) return r; - r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); + r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, + *size / 3); if (r) return r; break; case 0x02000001: // destroy - destroy = true; + destroyed = true; break; case 0x05000001: // context buffer + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + *size * 2); + if (r) + return r; + break; + case 0x05000004: // video bitstream buffer + tmp = radeon_get_ib_value(p, p->idx + 4); + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + tmp); + if (r) + return r; + break; + case 0x05000005: // feedback buffer - r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + 4096); if (r) return r; break; @@ -531,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } + if (session_idx == -1) { + DRM_ERROR("no session command at start of IB\n"); + return -EINVAL; + } + p->idx += len / 4; } - if (destroy) { + if (destroyed) { /* IB contains a destroy msg, free the handle */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); - - return 0; - } - - /* create or encode, validate the handle */ - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (atomic_read(&p->rdev->vce.handles[i]) == handle) - return 0; } - /* handle not found try to alloc a new one */ - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { - p->rdev->vce.filp[i] = p->filp; - return 0; - } - } - - DRM_ERROR("No more free VCE handles!\n"); - return -EINVAL; + return 0; } /** diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index a128a4fd64b3..a72e9c81805d 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, struct list_head *head) { struct radeon_cs_reloc *list; - unsigned i, idx, size; + unsigned i, idx; - size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); - list = kmalloc(size, GFP_KERNEL); + list = kmalloc_array(vm->max_pde_used + 2, + sizeof(struct radeon_cs_reloc), GFP_KERNEL); if (!list) return NULL; @@ -585,7 +585,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, { static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; - uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); + struct radeon_bo *pd = vm->page_directory; + uint64_t pd_addr = radeon_bo_gpu_offset(pd); uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; struct radeon_ib ib; @@ -595,7 +596,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ndw = 64; /* assume the worst case */ - ndw += vm->max_pde_used * 12; + ndw += vm->max_pde_used * 16; /* update too big for an IB */ if (ndw > 0xfffff) @@ -642,6 +643,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, incr, R600_PTE_VALID); if (ib.length_dw != 0) { + radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { @@ -767,15 +769,18 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, /* walk over the address space and update the page tables */ for (addr = start; addr < end; ) { uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; + struct radeon_bo *pt = vm->page_tables[pt_idx].bo; unsigned nptes; uint64_t pte; + radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); + if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = RADEON_VM_PTE_COUNT - (addr & mask); - pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo); + pte = radeon_bo_gpu_offset(pt); pte += (addr & mask) * 8; if ((last_pte + 8 * count) != pte) { diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index aca8cbe8a335..bbf2e076ee45 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 5c1c0c795e98..d64ef9115b69 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5784,7 +5784,6 @@ int si_irq_set(struct radeon_device *rdev) u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; u32 thermal_int = 0; @@ -5923,16 +5922,22 @@ int si_irq_set(struct radeon_device *rdev) } if (rdev->num_crtc >= 2) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (!ASIC_IS_NODCE(rdev)) { @@ -6296,6 +6301,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 952166930fb8..9a660f861d2c 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -231,6 +231,7 @@ int si_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index da8f8674a552..fd414d34d885 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -107,8 +107,8 @@ #define SPLL_CHG_STATUS (1 << 1) #define SPLL_CNTL_MODE 0x618 #define SPLL_SW_DIR_CONTROL (1 << 0) -# define SPLL_REFCLK_SEL(x) ((x) << 8) -# define SPLL_REFCLK_SEL_MASK 0xFF00 +# define SPLL_REFCLK_SEL(x) ((x) << 26) +# define SPLL_REFCLK_SEL_MASK (3 << 26) #define CG_SPLL_SPREAD_SPECTRUM 0x620 #define SSEN (1 << 0) diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 0a243f0e5d68..be42c8125203 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev) int r; /* raise clocks while booting up the VCPU */ - radeon_set_uvd_clocks(rdev, 53300, 40000); + if (rdev->family < CHIP_RV740) + radeon_set_uvd_clocks(rdev, 10000, 10000); + else + radeon_set_uvd_clocks(rdev, 53300, 40000); r = uvd_v1_0_start(rdev); if (r) @@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) struct radeon_fence *fence = NULL; int r; - r = radeon_set_uvd_clocks(rdev, 53300, 40000); + if (rdev->family < CHIP_RV740) + r = radeon_set_uvd_clocks(rdev, 10000, 10000); + else + r = radeon_set_uvd_clocks(rdev, 53300, 40000); if (r) { DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); return r; |