diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 242 |
1 files changed, 178 insertions, 64 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 0fda70336300..ecdc027f8220 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -87,7 +87,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = { static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev); static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev); static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); -static int vcn_v4_0_3_set_powergating_state(void *handle, +static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state); static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, struct dpg_pause_state *new_state); @@ -95,16 +95,23 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev); static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev, int inst_idx, bool indirect); + +static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev) +{ + return (amdgpu_sriov_vf(adev) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))); +} + /** * vcn_v4_0_3_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int vcn_v4_0_3_early_init(void *handle) +static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -116,16 +123,30 @@ static int vcn_v4_0_3_early_init(void *handle) return amdgpu_vcn_early_init(adev); } +static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx) +{ + struct amdgpu_vcn4_fw_shared *fw_shared; + + fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); + fw_shared->sq.is_enabled = 1; + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); + + return 0; +} + /** * vcn_v4_0_3_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_3_sw_init(void *handle) +static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r, vcn_inst; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); @@ -148,8 +169,6 @@ static int vcn_v4_0_3_sw_init(void *handle) return r; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; - vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; @@ -172,14 +191,13 @@ static int vcn_v4_0_3_sw_init(void *handle) if (r) return r; - fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; - fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); - fw_shared->sq.is_enabled = true; - - if (amdgpu_vcnfw_log) - amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); + vcn_v4_0_3_fw_shared_init(adev, i); } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -206,19 +224,23 @@ static int vcn_v4_0_3_sw_init(void *handle) adev->vcn.ip_dump = ptr; } + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * vcn_v4_0_3_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_3_sw_fini(void *handle) +static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(&adev->ddev, &idx)) { @@ -239,6 +261,7 @@ static int vcn_v4_0_3_sw_fini(void *handle) if (r) return r; + amdgpu_vcn_sysfs_reset_mask_fini(adev); r = amdgpu_vcn_sw_fini(adev); kfree(adev->vcn.ip_dump); @@ -249,13 +272,13 @@ static int vcn_v4_0_3_sw_fini(void *handle) /** * vcn_v4_0_3_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_3_hw_init(void *handle) +static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r, vcn_inst; @@ -273,6 +296,8 @@ static int vcn_v4_0_3_hw_init(void *handle) } } else { for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn4_fw_shared *fw_shared; + vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; @@ -296,6 +321,11 @@ static int vcn_v4_0_3_hw_init(void *handle) regVCN_RB1_DB_CTRL); } + /* Re-init fw_shared when RAS fatal error occurred */ + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + if (!fw_shared->sq.is_enabled) + vcn_v4_0_3_fw_shared_init(adev, i); + r = amdgpu_ring_test_helper(ring); if (r) return r; @@ -308,18 +338,18 @@ static int vcn_v4_0_3_hw_init(void *handle) /** * vcn_v4_0_3_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_3_hw_fini(void *handle) +static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); if (adev->vcn.cur_state != AMD_PG_STATE_GATE) - vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE); + vcn_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE); return 0; } @@ -327,20 +357,19 @@ static int vcn_v4_0_3_hw_fini(void *handle) /** * vcn_v4_0_3_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_3_suspend(void *handle) +static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = vcn_v4_0_3_hw_fini(adev); + r = vcn_v4_0_3_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -348,20 +377,19 @@ static int vcn_v4_0_3_suspend(void *handle) /** * vcn_v4_0_3_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_3_resume(void *handle) +static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_3_hw_init(adev); + r = vcn_v4_0_3_hw_init(ip_block); return r; } @@ -379,7 +407,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx) uint32_t offset, size, vcn_inst; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data; + hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); vcn_inst = GET_INST(VCN, inst_idx); @@ -454,7 +482,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i uint32_t offset, size; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data; + hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); /* cache window 0: fw */ @@ -929,6 +957,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; i++) { vcn_inst = GET_INST(VCN, i); + vcn_v4_0_3_fw_shared_init(adev, vcn_inst); + memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header)); header.version = MMSCH_VERSION; header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2; @@ -941,7 +971,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev) MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); - cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4); + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, @@ -1093,8 +1123,10 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) int i, j, k, r, vcn_inst; uint32_t tmp; - if (adev->pm.dpm_enabled) - amdgpu_dpm_enable_uvd(adev, true); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_vcn(adev, true, i); + } for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { @@ -1367,8 +1399,10 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev) vcn_v4_0_3_enable_clock_gating(adev, i); } Done: - if (adev->pm.dpm_enabled) - amdgpu_dpm_enable_uvd(adev, false); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_vcn(adev, false, i); + } return 0; } @@ -1430,8 +1464,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring) static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) { - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets when required */ + if (vcn_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_VCN_REG_OFFSET(reg); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); @@ -1442,8 +1476,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) { - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets when required */ + if (vcn_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_VCN_REG_OFFSET(reg); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); @@ -1567,13 +1601,13 @@ static bool vcn_v4_0_3_is_idle(void *handle) /** * vcn_v4_0_3_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_3_wait_for_idle(void *handle) +static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1588,15 +1622,15 @@ static int vcn_v4_0_3_wait_for_idle(void *handle) /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state * - * @handle: amdgpu_device pointer + * @ip_block: amdgpu_ip_block pointer * @state: clock gating state * * Set VCN block clockgating state */ -static int vcn_v4_0_3_set_clockgating_state(void *handle, +static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool enable = state == AMD_CG_STATE_GATE; int i; @@ -1616,15 +1650,15 @@ static int vcn_v4_0_3_set_clockgating_state(void *handle, /** * vcn_v4_0_3_set_powergating_state - set VCN block powergating state * - * @handle: amdgpu_device pointer + * @ip_block: amdgpu_ip_block pointer * @state: power gating state * * Set VCN block powergating state */ -static int vcn_v4_0_3_set_powergating_state(void *handle, +static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; /* for SRIOV, guest should not control VCN Power-gating @@ -1733,9 +1767,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; } -static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); uint32_t inst_off, is_powered; @@ -1765,9 +1799,9 @@ static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_3_dump_ip_state(void *handle) +static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off, inst_id; @@ -1798,7 +1832,6 @@ static void vcn_v4_0_3_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .name = "vcn_v4_0_3", .early_init = vcn_v4_0_3_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_3_sw_init, .sw_fini = vcn_v4_0_3_sw_fini, .hw_init = vcn_v4_0_3_hw_init, @@ -1807,10 +1840,6 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .resume = vcn_v4_0_3_resume, .is_idle = vcn_v4_0_3_is_idle, .wait_for_idle = vcn_v4_0_3_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_3_set_clockgating_state, .set_powergating_state = vcn_v4_0_3_set_powergating_state, .dump_ip_state = vcn_v4_0_3_dump_ip_state, @@ -1888,9 +1917,94 @@ static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = { .reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count, }; +static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + struct aca_bank_info info; + u64 misc0; + int ret; + + ret = aca_bank_info_decode(bank, &info); + if (ret) + return ret; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + switch (type) { + case ACA_SMU_TYPE_UE: + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); + break; + case ACA_SMU_TYPE_CE: + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, + ACA_REG__MISC0__ERRCNT(misc0)); + break; + default: + return -EINVAL; + } + + return ret; +} + +/* reference to smu driver if header file */ +static int vcn_v4_0_3_err_codes[] = { + 14, 15, /* VCN */ +}; + +static bool vcn_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + u32 instlo; + + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + if (aca_bank_check_error_codes(handle->adev, bank, + vcn_v4_0_3_err_codes, + ARRAY_SIZE(vcn_v4_0_3_err_codes))) + return false; + + return true; +} + +static const struct aca_bank_ops vcn_v4_0_3_aca_bank_ops = { + .aca_bank_parser = vcn_v4_0_3_aca_bank_parser, + .aca_bank_is_valid = vcn_v4_0_3_aca_bank_is_valid, +}; + +static const struct aca_info vcn_v4_0_3_aca_info = { + .hwip = ACA_HWIP_TYPE_SMU, + .mask = ACA_ERROR_UE_MASK, + .bank_ops = &vcn_v4_0_3_aca_bank_ops, +}; + +static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN, + &vcn_v4_0_3_aca_info, NULL); + if (r) + goto late_fini; + + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + + return r; +} + static struct amdgpu_vcn_ras vcn_v4_0_3_ras = { .ras_block = { .hw_ops = &vcn_v4_0_3_ras_hw_ops, + .ras_late_init = vcn_v4_0_3_ras_late_init, }, }; |