diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/si.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/si.c | 652 |
1 files changed, 623 insertions, 29 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index f2d70a47a3af..e6d2f74a7976 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -26,6 +26,8 @@ #include <linux/module.h> #include <linux/pci.h> +#include <drm/amdgpu_drm.h> + #include "amdgpu.h" #include "amdgpu_atombios.h" #include "amdgpu_ih.h" @@ -41,15 +43,19 @@ #include "si_dma.h" #include "dce_v6_0.h" #include "si.h" -#include "dce_virtual.h" +#include "uvd_v3_1.h" +#include "amdgpu_vkms.h" #include "gca/gfx_6_0_d.h" #include "oss/oss_1_0_d.h" +#include "oss/oss_1_0_sh_mask.h" #include "gmc/gmc_6_0_d.h" #include "dce/dce_6_0_d.h" #include "uvd/uvd_4_0_d.h" #include "bif/bif_3_0_d.h" #include "bif/bif_3_0_sh_mask.h" +#include "amdgpu_dm.h" + static const u32 tahiti_golden_registers[] = { mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011, @@ -901,6 +907,114 @@ static const u32 hainan_mgcg_cgcg_init[] = 0x3630, 0xfffffff0, 0x00000100, }; +/* XXX: update when we support VCE */ +#if 0 +/* tahiti, pitcarin, verde */ +static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 0, + }, +}; + +static const struct amdgpu_video_codecs tahiti_video_codecs_encode = +{ + .codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array), + .codec_array = tahiti_video_codecs_encode_array, +}; +#else +static const struct amdgpu_video_codecs tahiti_video_codecs_encode = +{ + .codec_count = 0, + .codec_array = NULL, +}; +#endif +/* oland and hainan don't support encode */ +static const struct amdgpu_video_codecs hainan_video_codecs_encode = +{ + .codec_count = 0, + .codec_array = NULL, +}; + +/* tahiti, pitcarin, verde, oland */ +static const struct amdgpu_video_codec_info tahiti_video_codecs_decode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 3, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 5, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 41, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 4, + }, +}; + +static const struct amdgpu_video_codecs tahiti_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(tahiti_video_codecs_decode_array), + .codec_array = tahiti_video_codecs_decode_array, +}; + +/* hainan doesn't support decode */ +static const struct amdgpu_video_codecs hainan_video_codecs_decode = +{ + .codec_count = 0, + .codec_array = NULL, +}; + +static int si_query_video_codecs(struct amdgpu_device *adev, bool encode, + const struct amdgpu_video_codecs **codecs) +{ + switch (adev->asic_type) { + case CHIP_VERDE: + case CHIP_TAHITI: + case CHIP_PITCAIRN: + if (encode) + *codecs = &tahiti_video_codecs_encode; + else + *codecs = &tahiti_video_codecs_decode; + return 0; + case CHIP_OLAND: + if (encode) + *codecs = &hainan_video_codecs_encode; + else + *codecs = &tahiti_video_codecs_decode; + return 0; + case CHIP_HAINAN: + if (encode) + *codecs = &hainan_video_codecs_encode; + else + *codecs = &hainan_video_codecs_decode; + return 0; + default: + return -EINVAL; + } +} + static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags; @@ -973,6 +1087,28 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->smc_idx_lock, flags); } +static u32 si_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); + WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); + r = RREG32(mmUVD_CTX_DATA); + spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); + return r; +} + +static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); + WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); + WREG32(mmUVD_CTX_DATA, (v)); + spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); +} + static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { {GRBM_STATUS}, {mmGRBM_STATUS2}, @@ -1191,18 +1327,124 @@ static bool si_read_bios_from_rom(struct amdgpu_device *adev, return true; } -//xxx: not implemented -static int si_asic_reset(struct amdgpu_device *adev) +static void si_set_clk_bypass_mode(struct amdgpu_device *adev) { - return 0; + u32 tmp, i; + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_BYPASS_EN; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp |= SPLL_CTLREQ_CHG; + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS) + break; + udelay(1); + } + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE); + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + tmp = RREG32(MPLL_CNTL_MODE); + tmp &= ~MPLL_MCLK_SEL; + WREG32(MPLL_CNTL_MODE, tmp); +} + +static void si_spll_powerdown(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32(SPLL_CNTL_MODE); + tmp |= SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_RESET; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_SLEEP; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(SPLL_CNTL_MODE); + tmp &= ~SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); +} + +static int si_gpu_pci_config_reset(struct amdgpu_device *adev) +{ + u32 i; + int r = -EINVAL; + + amdgpu_atombios_scratch_regs_engine_hung(adev, true); + + /* set mclk/sclk to bypass */ + si_set_clk_bypass_mode(adev); + /* powerdown spll */ + si_spll_powerdown(adev); + /* disable BM */ + pci_clear_master(adev->pdev); + /* reset */ + amdgpu_device_pci_config_reset(adev); + + udelay(100); + + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + adev->has_hw_reset = true; + r = 0; + break; + } + udelay(1); + } + amdgpu_atombios_scratch_regs_engine_hung(adev, false); + + return r; +} + +static bool si_asic_supports_baco(struct amdgpu_device *adev) +{ + return false; } static enum amd_reset_method si_asic_reset_method(struct amdgpu_device *adev) { + if (amdgpu_reset_method == AMD_RESET_METHOD_PCI) + return amdgpu_reset_method; + else if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY && + amdgpu_reset_method != -1) + dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", + amdgpu_reset_method); + return AMD_RESET_METHOD_LEGACY; } +static int si_asic_reset(struct amdgpu_device *adev) +{ + int r; + + switch (si_asic_reset_method(adev)) { + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + r = amdgpu_device_pci_reset(adev); + break; + default: + dev_info(adev->dev, "PCI CONFIG reset\n"); + r = si_gpu_pci_config_reset(adev); + break; + } + + return r; +} + static u32 si_get_config_memsize(struct amdgpu_device *adev) { return RREG32(mmCONFIG_MEMSIZE); @@ -1213,7 +1455,7 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state) uint32_t temp; temp = RREG32(CONFIG_CNTL); - if (state == false) { + if (!state) { temp &= ~(1<<0); temp |= (1<<1); } else { @@ -1224,7 +1466,7 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state) static u32 si_get_xclk(struct amdgpu_device *adev) { - u32 reference_clock = adev->clock.spll.reference_freq; + u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; tmp = RREG32(CG_CLKPIN_CNTL_2); @@ -1238,18 +1480,6 @@ static u32 si_get_xclk(struct amdgpu_device *adev) return reference_clock; } -//xxx:not implemented -static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) -{ - return 0; -} - -static void si_detect_hw_virtualization(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) { @@ -1405,6 +1635,362 @@ static uint64_t si_get_pcie_replay_count(struct amdgpu_device *adev) return (nak_r + nak_g); } +static int si_uvd_send_upll_ctlreq(struct amdgpu_device *adev, + unsigned cg_upll_func_cntl) +{ + unsigned i; + + /* Make sure UPLL_CTLREQ is deasserted */ + WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); + + mdelay(10); + + /* Assert UPLL_CTLREQ */ + WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); + + /* Wait for CTLACK and CTLACK2 to get asserted */ + for (i = 0; i < SI_MAX_CTLACKS_ASSERTION_WAIT; ++i) { + uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; + + if ((RREG32(cg_upll_func_cntl) & mask) == mask) + break; + mdelay(10); + } + + /* Deassert UPLL_CTLREQ */ + WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); + + if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) { + DRM_ERROR("Timeout setting UVD clocks!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static unsigned si_uvd_calc_upll_post_div(unsigned vco_freq, + unsigned target_freq, + unsigned pd_min, + unsigned pd_even) +{ + unsigned post_div = vco_freq / target_freq; + + /* Adjust to post divider minimum value */ + if (post_div < pd_min) + post_div = pd_min; + + /* We alway need a frequency less than or equal the target */ + if ((vco_freq / post_div) > target_freq) + post_div += 1; + + /* Post dividers above a certain value must be even */ + if (post_div > pd_even && post_div % 2) + post_div += 1; + + return post_div; +} + +/** + * si_calc_upll_dividers - calc UPLL clock dividers + * + * @adev: amdgpu_device pointer + * @vclk: wanted VCLK + * @dclk: wanted DCLK + * @vco_min: minimum VCO frequency + * @vco_max: maximum VCO frequency + * @fb_factor: factor to multiply vco freq with + * @fb_mask: limit and bitmask for feedback divider + * @pd_min: post divider minimum + * @pd_max: post divider maximum + * @pd_even: post divider must be even above this value + * @optimal_fb_div: resulting feedback divider + * @optimal_vclk_div: resulting vclk post divider + * @optimal_dclk_div: resulting dclk post divider + * + * Calculate dividers for UVDs UPLL (except APUs). + * Returns zero on success; -EINVAL on error. + */ +static int si_calc_upll_dividers(struct amdgpu_device *adev, + unsigned vclk, unsigned dclk, + unsigned vco_min, unsigned vco_max, + unsigned fb_factor, unsigned fb_mask, + unsigned pd_min, unsigned pd_max, + unsigned pd_even, + unsigned *optimal_fb_div, + unsigned *optimal_vclk_div, + unsigned *optimal_dclk_div) +{ + unsigned vco_freq, ref_freq = adev->clock.spll.reference_freq; + + /* Start off with something large */ + unsigned optimal_score = ~0; + + /* Loop through vco from low to high */ + vco_min = max(max(vco_min, vclk), dclk); + for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { + uint64_t fb_div = (uint64_t)vco_freq * fb_factor; + unsigned vclk_div, dclk_div, score; + + do_div(fb_div, ref_freq); + + /* fb div out of range ? */ + if (fb_div > fb_mask) + break; /* It can oly get worse */ + + fb_div &= fb_mask; + + /* Calc vclk divider with current vco freq */ + vclk_div = si_uvd_calc_upll_post_div(vco_freq, vclk, + pd_min, pd_even); + if (vclk_div > pd_max) + break; /* vco is too big, it has to stop */ + + /* Calc dclk divider with current vco freq */ + dclk_div = si_uvd_calc_upll_post_div(vco_freq, dclk, + pd_min, pd_even); + if (dclk_div > pd_max) + break; /* vco is too big, it has to stop */ + + /* Calc score with current vco freq */ + score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); + + /* Determine if this vco setting is better than current optimal settings */ + if (score < optimal_score) { + *optimal_fb_div = fb_div; + *optimal_vclk_div = vclk_div; + *optimal_dclk_div = dclk_div; + optimal_score = score; + if (optimal_score == 0) + break; /* It can't get better than this */ + } + } + + /* Did we found a valid setup ? */ + if (optimal_score == ~0) + return -EINVAL; + + return 0; +} + +static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) +{ + unsigned fb_div = 0, vclk_div = 0, dclk_div = 0; + int r; + + /* Bypass vclk and dclk with bclk */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), + ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); + + /* Put PLL in bypass mode */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); + + if (!vclk || !dclk) { + /* Keep the Bypass mode */ + return 0; + } + + r = si_calc_upll_dividers(adev, vclk, dclk, 125000, 250000, + 16384, 0x03FFFFFF, 0, 128, 5, + &fb_div, &vclk_div, &dclk_div); + if (r) + return r; + + /* Set RESET_ANTI_MUX to 0 */ + WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK); + + /* Set VCO_MODE to 1 */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); + + /* Disable sleep mode */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); + + /* Deassert UPLL_RESET */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); + + mdelay(1); + + r = si_uvd_send_upll_ctlreq(adev, CG_UPLL_FUNC_CNTL); + if (r) + return r; + + /* Assert UPLL_RESET again */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); + + /* Disable spread spectrum. */ + WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); + + /* Set feedback divider */ + WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK); + + /* Set ref divider to 0 */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK); + + if (fb_div < 307200) + WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9); + else + WREG32_P(CG_UPLL_FUNC_CNTL_4, + UPLL_SPARE_ISPARE9, + ~UPLL_SPARE_ISPARE9); + + /* Set PDIV_A and PDIV_B */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div), + ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK)); + + /* Give the PLL some time to settle */ + mdelay(15); + + /* Deassert PLL_RESET */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); + + mdelay(15); + + /* Switch from bypass mode to normal mode */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); + + r = si_uvd_send_upll_ctlreq(adev, CG_UPLL_FUNC_CNTL); + if (r) + return r; + + /* Switch VCLK and DCLK selection */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), + ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); + + mdelay(100); + + return 0; +} + +static int si_vce_send_vcepll_ctlreq(struct amdgpu_device *adev) +{ + unsigned i; + + /* Make sure VCEPLL_CTLREQ is deasserted */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); + + mdelay(10); + + /* Assert UPLL_CTLREQ */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); + + /* Wait for CTLACK and CTLACK2 to get asserted */ + for (i = 0; i < SI_MAX_CTLACKS_ASSERTION_WAIT; ++i) { + uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; + + if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask) + break; + mdelay(10); + } + + /* Deassert UPLL_CTLREQ */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); + + if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) { + DRM_ERROR("Timeout setting UVD clocks!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) +{ + unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0; + int r; + + /* Bypass evclk and ecclk with bclk */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, + EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1), + ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); + + /* Put PLL in bypass mode */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK, + ~VCEPLL_BYPASS_EN_MASK); + + if (!evclk || !ecclk) { + /* Keep the Bypass mode, put PLL to sleep */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, + ~VCEPLL_SLEEP_MASK); + return 0; + } + + r = si_calc_upll_dividers(adev, evclk, ecclk, 125000, 250000, + 16384, 0x03FFFFFF, 0, 128, 5, + &fb_div, &evclk_div, &ecclk_div); + if (r) + return r; + + /* Set RESET_ANTI_MUX to 0 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK); + + /* Set VCO_MODE to 1 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK, + ~VCEPLL_VCO_MODE_MASK); + + /* Toggle VCEPLL_SLEEP to 1 then back to 0 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, + ~VCEPLL_SLEEP_MASK); + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK); + + /* Deassert VCEPLL_RESET */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); + + mdelay(1); + + r = si_vce_send_vcepll_ctlreq(adev); + if (r) + return r; + + /* Assert VCEPLL_RESET again */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK); + + /* Disable spread spectrum. */ + WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); + + /* Set feedback divider */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, + VCEPLL_FB_DIV(fb_div), + ~VCEPLL_FB_DIV_MASK); + + /* Set ref divider to 0 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK); + + /* Set PDIV_A and PDIV_B */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, + VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div), + ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK)); + + /* Give the PLL some time to settle */ + mdelay(15); + + /* Deassert PLL_RESET */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); + + mdelay(15); + + /* Switch from bypass mode to normal mode */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK); + + r = si_vce_send_vcepll_ctlreq(adev); + if (r) + return r; + + /* Switch VCLK and DCLK selection */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, + EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16), + ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); + + mdelay(100); + + return 0; +} + +static void si_pre_asic_init(struct amdgpu_device *adev) +{ +} + static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, @@ -1415,7 +2001,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .set_vga_state = &si_vga_set_state, .get_xclk = &si_get_xclk, .set_uvd_clocks = &si_set_uvd_clocks, - .set_vce_clocks = NULL, + .set_vce_clocks = &si_set_vce_clocks, .get_pcie_lanes = &si_get_pcie_lanes, .set_pcie_lanes = &si_set_pcie_lanes, .get_config_memsize = &si_get_config_memsize, @@ -1425,6 +2011,9 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .get_pcie_usage = &si_get_pcie_usage, .need_reset_on_init = &si_need_reset_on_init, .get_pcie_replay_count = &si_get_pcie_replay_count, + .supports_baco = &si_asic_supports_baco, + .pre_asic_init = &si_pre_asic_init, + .query_video_codecs = &si_query_video_codecs, }; static uint32_t si_get_rev_id(struct amdgpu_device *adev) @@ -1443,8 +2032,8 @@ static int si_common_early_init(void *handle) adev->pcie_wreg = &si_pcie_wreg; adev->pciep_rreg = &si_pciep_rreg; adev->pciep_wreg = &si_pciep_wreg; - adev->uvd_ctx_rreg = NULL; - adev->uvd_ctx_wreg = NULL; + adev->uvd_ctx_rreg = si_uvd_ctx_rreg; + adev->uvd_ctx_wreg = si_uvd_ctx_wreg; adev->didt_rreg = NULL; adev->didt_wreg = NULL; @@ -2159,8 +2748,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block = int si_set_ip_blocks(struct amdgpu_device *adev) { - si_detect_hw_virtualization(adev); - switch (adev->asic_type) { case CHIP_VERDE: case CHIP_TAHITI: @@ -2172,10 +2759,14 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_dma_ip_block); amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); +#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI) + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif else amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block); - /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */ + amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; case CHIP_OLAND: @@ -2186,11 +2777,14 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_dma_ip_block); amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); +#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI) + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif else amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block); - - /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */ + amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; case CHIP_HAINAN: @@ -2201,7 +2795,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_dma_ip_block); amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); break; default: BUG(); |
