diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/soc15.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/soc15.c | 278 |
1 files changed, 141 insertions, 137 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4e67fe1e7955..a04a033f57de 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -34,18 +34,17 @@ #include "atom.h" #include "amd_pcie.h" -#include "vega10/soc15ip.h" -#include "vega10/UVD/uvd_7_0_offset.h" -#include "vega10/GC/gc_9_0_offset.h" -#include "vega10/GC/gc_9_0_sh_mask.h" -#include "vega10/SDMA0/sdma0_4_0_offset.h" -#include "vega10/SDMA1/sdma1_4_0_offset.h" -#include "vega10/HDP/hdp_4_0_offset.h" -#include "vega10/HDP/hdp_4_0_sh_mask.h" -#include "vega10/MP/mp_9_0_offset.h" -#include "vega10/MP/mp_9_0_sh_mask.h" -#include "vega10/SMUIO/smuio_9_0_offset.h" -#include "vega10/SMUIO/smuio_9_0_sh_mask.h" +#include "uvd/uvd_7_0_offset.h" +#include "gc/gc_9_0_offset.h" +#include "gc/gc_9_0_sh_mask.h" +#include "sdma0/sdma0_4_0_offset.h" +#include "sdma1/sdma1_4_0_offset.h" +#include "hdp/hdp_4_0_offset.h" +#include "hdp/hdp_4_0_sh_mask.h" +#include "mp/mp_9_0_offset.h" +#include "mp/mp_9_0_sh_mask.h" +#include "smuio/smuio_9_0_offset.h" +#include "smuio/smuio_9_0_sh_mask.h" #include "soc15.h" #include "soc15_common.h" @@ -101,15 +100,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u32 r; - const struct nbio_pcie_index_data *nbio_pcie_id; - - if (adev->flags & AMD_IS_APU) - nbio_pcie_id = &nbio_v7_0_pcie_index_data; - else - nbio_pcie_id = &nbio_v6_1_pcie_index_data; - - address = nbio_pcie_id->index_offset; - data = nbio_pcie_id->data_offset; + address = adev->nbio_funcs->get_pcie_index_offset(adev); + data = adev->nbio_funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -122,15 +114,9 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { unsigned long flags, address, data; - const struct nbio_pcie_index_data *nbio_pcie_id; - if (adev->flags & AMD_IS_APU) - nbio_pcie_id = &nbio_v7_0_pcie_index_data; - else - nbio_pcie_id = &nbio_v6_1_pcie_index_data; - - address = nbio_pcie_id->index_offset; - data = nbio_pcie_id->data_offset; + address = adev->nbio_funcs->get_pcie_index_offset(adev); + data = adev->nbio_funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -242,41 +228,9 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static u32 soc15_get_config_memsize(struct amdgpu_device *adev) { - if (adev->flags & AMD_IS_APU) - return nbio_v7_0_get_memsize(adev); - else - return nbio_v6_1_get_memsize(adev); + return adev->nbio_funcs->get_memsize(adev); } -static const u32 vega10_golden_init[] = -{ -}; - -static const u32 raven_golden_init[] = -{ -}; - -static void soc15_init_golden_registers(struct amdgpu_device *adev) -{ - /* Some of the registers might be dependent on GRBM_GFX_INDEX */ - mutex_lock(&adev->grbm_idx_mutex); - - switch (adev->asic_type) { - case CHIP_VEGA10: - amdgpu_program_register_sequence(adev, - vega10_golden_init, - (const u32)ARRAY_SIZE(vega10_golden_init)); - break; - case CHIP_RAVEN: - amdgpu_program_register_sequence(adev, - raven_golden_init, - (const u32)ARRAY_SIZE(raven_golden_init)); - break; - default: - break; - } - mutex_unlock(&adev->grbm_idx_mutex); -} static u32 soc15_get_xclk(struct amdgpu_device *adev) { return adev->clock.spll.reference_freq; @@ -332,25 +286,34 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = { - { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)}, - { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)}, - { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)}, - { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)}, - { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)}, - { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)}, - { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)}, - { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)}, - { SOC15_REG_OFFSET(GC, 0, mmCP_STAT)}, - { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)}, - { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)}, - { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)}, - { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)}, - { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)}, - { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)}, - { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)}, - { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)}, - { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)}, +struct soc15_allowed_register_entry { + uint32_t hwip; + uint32_t inst; + uint32_t seg; + uint32_t reg_offset; + bool grbm_indexed; +}; + + +static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { + { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, + { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, + { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, + { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, + { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, + { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, + { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, + { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, + { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, }; static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, @@ -377,12 +340,9 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev, if (indexed) { return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); } else { - switch (reg_offset) { - case SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG): + if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) return adev->gfx.config.gb_addr_config; - default: - return RREG32(reg_offset); - } + return RREG32(reg_offset); } } @@ -390,10 +350,13 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value) { uint32_t i; + struct soc15_allowed_register_entry *en; *value = 0; for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { - if (reg_offset != soc15_allowed_read_registers[i].reg_offset) + en = &soc15_allowed_read_registers[i]; + if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + + en->reg_offset)) continue; *value = soc15_get_register_value(adev, @@ -404,6 +367,43 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } + +/** + * soc15_program_register_sequence - program an array of registers. + * + * @adev: amdgpu_device pointer + * @regs: pointer to the register array + * @array_size: size of the register array + * + * Programs an array or registers with and and or masks. + * This is a helper for setting golden registers. + */ + +void soc15_program_register_sequence(struct amdgpu_device *adev, + const struct soc15_reg_golden *regs, + const u32 array_size) +{ + const struct soc15_reg_golden *entry; + u32 tmp, reg; + int i; + + for (i = 0; i < array_size; ++i) { + entry = ®s[i]; + reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + + if (entry->and_mask == 0xffffffff) { + tmp = entry->or_mask; + } else { + tmp = RREG32(reg); + tmp &= ~(entry->and_mask); + tmp |= entry->or_mask; + } + WREG32(reg, tmp); + } + +} + + static int soc15_asic_reset(struct amdgpu_device *adev) { u32 i; @@ -428,9 +428,8 @@ static int soc15_asic_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = (adev->flags & AMD_IS_APU) ? - nbio_v7_0_get_memsize(adev) : - nbio_v6_1_get_memsize(adev); + u32 memsize = adev->nbio_funcs->get_memsize(adev); + if (memsize != 0xffffffff) break; udelay(1); @@ -495,14 +494,10 @@ static void soc15_program_aspm(struct amdgpu_device *adev) } static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) + bool enable) { - if (adev->flags & AMD_IS_APU) { - nbio_v7_0_enable_doorbell_aperture(adev, enable); - } else { - nbio_v6_1_enable_doorbell_aperture(adev, enable); - nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable); - } + adev->nbio_funcs->enable_doorbell_aperture(adev, enable); + adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); } static const struct amdgpu_ip_block_version vega10_common_ip_block = @@ -516,50 +511,65 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block = int soc15_set_ip_blocks(struct amdgpu_device *adev) { - nbio_v6_1_detect_hw_virt(adev); + /* Set IP register base before any HW register access */ + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_RAVEN: + vega10_reg_base_init(adev); + break; + default: + return -EINVAL; + } + + if (adev->flags & AMD_IS_APU) + adev->nbio_funcs = &nbio_v7_0_funcs; + else + adev->nbio_funcs = &nbio_v6_1_funcs; + + adev->nbio_funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_ai_virt_ops; switch (adev->asic_type) { case CHIP_VEGA10: - amdgpu_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_ip_block_add(adev, &vega10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1) - amdgpu_ip_block_add(adev, &psp_v3_1_ip_block); + amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); if (!amdgpu_sriov_vf(adev)) - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #else # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." #endif - amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block); - amdgpu_ip_block_add(adev, &vce_v4_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; case CHIP_RAVEN: - amdgpu_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_ip_block_add(adev, &vega10_ih_ip_block); - amdgpu_ip_block_add(adev, &psp_v10_0_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #else # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." #endif - amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); break; default: return -EINVAL; @@ -570,10 +580,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) { - if (adev->flags & AMD_IS_APU) - return nbio_v7_0_get_rev_id(adev); - else - return nbio_v6_1_get_rev_id(adev); + return adev->nbio_funcs->get_rev_id(adev); } static const struct amdgpu_asic_funcs soc15_asic_funcs = @@ -609,8 +616,8 @@ static int soc15_common_early_init(void *handle) adev->asic_funcs = &soc15_asic_funcs; - if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && - (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && + (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) psp_enabled = true; adev->rev_id = soc15_get_rev_id(adev); @@ -659,8 +666,8 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB; + adev->pg_flags = AMD_PG_SUPPORT_SDMA; + adev->external_rev_id = 0x1; break; default: @@ -675,7 +682,7 @@ static int soc15_common_early_init(void *handle) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); - amdgpu_get_pcie_info(adev); + amdgpu_device_get_pcie_info(adev); return 0; } @@ -709,15 +716,12 @@ static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* move the golden regs per IP block */ - soc15_init_golden_registers(adev); /* enable pcie gen2/3 link */ soc15_pcie_gen3_enable(adev); /* enable aspm */ soc15_program_aspm(adev); /* setup nbio registers */ - if (!(adev->flags & AMD_IS_APU)) - nbio_v6_1_init_registers(adev); + adev->nbio_funcs->init_registers(adev); /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); @@ -878,9 +882,9 @@ static int soc15_common_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: - nbio_v6_1_update_medium_grain_clock_gating(adev, + adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - nbio_v6_1_update_medium_grain_light_sleep(adev, + adev->nbio_funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); soc15_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -894,9 +898,9 @@ static int soc15_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE ? true : false); break; case CHIP_RAVEN: - nbio_v7_0_update_medium_grain_clock_gating(adev, + adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - nbio_v6_1_update_medium_grain_light_sleep(adev, + adev->nbio_funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); soc15_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -921,7 +925,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - nbio_v6_1_get_clockgating_state(adev, flags); + adev->nbio_funcs->get_clockgating_state(adev, flags); /* AMD_CG_SUPPORT_HDP_LS */ data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); |