diff options
Diffstat (limited to 'drivers/gpu')
191 files changed, 10244 insertions, 2594 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e7a010b7ca1f..468003583b2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -43,14 +43,61 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_VCN_JPEG] = 1, }; +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio) +{ + switch (ctx_prio) { + case AMDGPU_CTX_PRIORITY_UNSET: + case AMDGPU_CTX_PRIORITY_VERY_LOW: + case AMDGPU_CTX_PRIORITY_LOW: + case AMDGPU_CTX_PRIORITY_NORMAL: + case AMDGPU_CTX_PRIORITY_HIGH: + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return true; + default: + return false; + } +} + +static enum drm_sched_priority +amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) +{ + switch (ctx_prio) { + case AMDGPU_CTX_PRIORITY_UNSET: + return DRM_SCHED_PRIORITY_UNSET; + + case AMDGPU_CTX_PRIORITY_VERY_LOW: + return DRM_SCHED_PRIORITY_MIN; + + case AMDGPU_CTX_PRIORITY_LOW: + return DRM_SCHED_PRIORITY_MIN; + + case AMDGPU_CTX_PRIORITY_NORMAL: + return DRM_SCHED_PRIORITY_NORMAL; + + case AMDGPU_CTX_PRIORITY_HIGH: + return DRM_SCHED_PRIORITY_HIGH; + + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return DRM_SCHED_PRIORITY_HIGH; + + /* This should not happen as we sanitized userspace provided priority + * already, WARN if this happens. + */ + default: + WARN(1, "Invalid context priority %d\n", ctx_prio); + return DRM_SCHED_PRIORITY_NORMAL; + } + +} + static int amdgpu_ctx_priority_permit(struct drm_file *filp, - enum drm_sched_priority priority) + int32_t priority) { - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT) + if (!amdgpu_ctx_priority_is_valid(priority)) return -EINVAL; /* NORMAL and below are accessible by everyone */ - if (priority <= DRM_SCHED_PRIORITY_NORMAL) + if (priority <= AMDGPU_CTX_PRIORITY_NORMAL) return 0; if (capable(CAP_SYS_NICE)) @@ -62,26 +109,51 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } -static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) +static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio) { switch (prio) { - case DRM_SCHED_PRIORITY_HIGH: - case DRM_SCHED_PRIORITY_KERNEL: + case AMDGPU_CTX_PRIORITY_HIGH: + case AMDGPU_CTX_PRIORITY_VERY_HIGH: return AMDGPU_GFX_PIPE_PRIO_HIGH; default: return AMDGPU_GFX_PIPE_PRIO_NORMAL; } } -static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev, - enum drm_sched_priority prio, - u32 hw_ip) +static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio) { + switch (prio) { + case AMDGPU_CTX_PRIORITY_HIGH: + return AMDGPU_RING_PRIO_1; + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} + +static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip) +{ + struct amdgpu_device *adev = ctx->adev; + int32_t ctx_prio; unsigned int hw_prio; - hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ? - amdgpu_ctx_sched_prio_to_compute_prio(prio) : - AMDGPU_RING_PRIO_DEFAULT; + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; + + switch (hw_ip) { + case AMDGPU_HW_IP_COMPUTE: + hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio); + break; + case AMDGPU_HW_IP_VCE: + case AMDGPU_HW_IP_VCN_ENC: + hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio); + break; + default: + hw_prio = AMDGPU_RING_PRIO_DEFAULT; + break; + } + hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0) hw_prio = AMDGPU_RING_PRIO_DEFAULT; @@ -89,15 +161,17 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev, return hw_prio; } + static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, - const u32 ring) + const u32 ring) { struct amdgpu_device *adev = ctx->adev; struct amdgpu_ctx_entity *entity; struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; unsigned num_scheds = 0; + int32_t ctx_prio; unsigned int hw_prio; - enum drm_sched_priority priority; + enum drm_sched_priority drm_prio; int r; entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), @@ -105,10 +179,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, if (!entity) return -ENOMEM; + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; entity->sequence = 1; - priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? - ctx->init_priority : ctx->override_priority; - hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip); + hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); + drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio); hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); scheds = adev->gpu_sched[hw_ip][hw_prio].sched; @@ -124,7 +199,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, num_scheds = 1; } - r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, + r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds, &ctx->guilty); if (r) goto error_free_entity; @@ -139,7 +214,7 @@ error_free_entity: } static int amdgpu_ctx_init(struct amdgpu_device *adev, - enum drm_sched_priority priority, + int32_t priority, struct drm_file *filp, struct amdgpu_ctx *ctx) { @@ -161,7 +236,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->reset_counter_query = ctx->reset_counter; ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); ctx->init_priority = priority; - ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; + ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET; return 0; } @@ -234,7 +309,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, static int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *filp, - enum drm_sched_priority priority, + int32_t priority, uint32_t *id) { struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; @@ -397,19 +472,19 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, { int r; uint32_t id; - enum drm_sched_priority priority; + int32_t priority; union drm_amdgpu_ctx *args = data; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; id = args->in.ctx_id; - r = amdgpu_to_sched_priority(args->in.priority, &priority); + priority = args->in.priority; /* For backwards compatibility reasons, we need to accept * ioctls with garbage in the priority field */ - if (r == -EINVAL) - priority = DRM_SCHED_PRIORITY_NORMAL; + if (!amdgpu_ctx_priority_is_valid(priority)) + priority = AMDGPU_CTX_PRIORITY_NORMAL; switch (args->in.op) { case AMDGPU_CTX_OP_ALLOC_CTX: @@ -515,9 +590,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, } static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, - struct amdgpu_ctx_entity *aentity, - int hw_ip, - enum drm_sched_priority priority) + struct amdgpu_ctx_entity *aentity, + int hw_ip, + int32_t priority) { struct amdgpu_device *adev = ctx->adev; unsigned int hw_prio; @@ -525,12 +600,12 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, unsigned num_scheds; /* set sw priority */ - drm_sched_entity_set_priority(&aentity->entity, priority); + drm_sched_entity_set_priority(&aentity->entity, + amdgpu_ctx_to_drm_sched_prio(priority)); /* set hw priority */ if (hw_ip == AMDGPU_HW_IP_COMPUTE) { - hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, - AMDGPU_HW_IP_COMPUTE); + hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX); scheds = adev->gpu_sched[hw_ip][hw_prio].sched; num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; @@ -540,14 +615,14 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, } void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum drm_sched_priority priority) + int32_t priority) { - enum drm_sched_priority ctx_prio; + int32_t ctx_prio; unsigned i, j; ctx->override_priority = priority; - ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 14db16bc3322..a44b8b8ed39c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -47,8 +47,8 @@ struct amdgpu_ctx { spinlock_t ring_lock; struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM]; bool preamble_presented; - enum drm_sched_priority init_priority; - enum drm_sched_priority override_priority; + int32_t init_priority; + int32_t override_priority; struct mutex lock; atomic_t guilty; unsigned long ras_counter_ce; @@ -75,8 +75,8 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, uint64_t seq); -void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum drm_sched_priority priority); +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio); +void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int32_t ctx_prio); int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 463b9c0283f7..074ffcf0dac2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -36,6 +36,7 @@ #include "amdgpu_rap.h" #include "amdgpu_securedisplay.h" #include "amdgpu_fw_attestation.h" +#include "amdgpu_umr.h" int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev) { @@ -279,6 +280,145 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); } +static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_regs2_data *rd; + + rd = kzalloc(sizeof *rd, GFP_KERNEL); + if (!rd) + return -ENOMEM; + rd->adev = file_inode(file)->i_private; + file->private_data = rd; + mutex_init(&rd->lock); + + return 0; +} + +static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_regs2_data *rd = file->private_data; + mutex_destroy(&rd->lock); + kfree(file->private_data); + return 0; +} + +static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en) +{ + struct amdgpu_debugfs_regs2_data *rd = f->private_data; + struct amdgpu_device *adev = rd->adev; + ssize_t result = 0; + int r; + uint32_t value; + + if (size & 0x3 || offset & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + mutex_lock(&rd->lock); + + if (rd->id.use_grbm) { + if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || + (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); + mutex_unlock(&rd->lock); + return -EINVAL; + } + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, + rd->id.grbm.sh, + rd->id.grbm.instance); + } + + if (rd->id.use_srbm) { + mutex_lock(&adev->srbm_mutex); + amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, + rd->id.srbm.queue, rd->id.srbm.vmid); + } + + if (rd->id.pg_lock) + mutex_lock(&adev->pm.mutex); + + while (size) { + if (!write_en) { + value = RREG32(offset >> 2); + r = put_user(value, (uint32_t *)buf); + } else { + r = get_user(value, (uint32_t *)buf); + if (!r) + amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value); + } + if (r) { + result = r; + goto end; + } + offset += 4; + size -= 4; + result += 4; + buf += 4; + } +end: + if (rd->id.use_grbm) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + } + + if (rd->id.use_srbm) { + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } + + if (rd->id.pg_lock) + mutex_unlock(&adev->pm.mutex); + + mutex_unlock(&rd->lock); + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); + return result; +} + +static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) +{ + struct amdgpu_debugfs_regs2_data *rd = f->private_data; + int r; + + switch (cmd) { + case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: + mutex_lock(&rd->lock); + r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id); + mutex_unlock(&rd->lock); + return r ? -EINVAL : 0; + default: + return -EINVAL; + } + return 0; +} + +static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) +{ + return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0); +} + +static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) +{ + return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); +} + /** * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register @@ -1091,6 +1231,16 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, return result; } +static const struct file_operations amdgpu_debugfs_regs2_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl, + .read = amdgpu_debugfs_regs2_read, + .write = amdgpu_debugfs_regs2_write, + .open = amdgpu_debugfs_regs2_open, + .release = amdgpu_debugfs_regs2_release, + .llseek = default_llseek +}; + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -1148,6 +1298,7 @@ static const struct file_operations amdgpu_debugfs_gfxoff_fops = { static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, + &amdgpu_debugfs_regs2_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, @@ -1160,6 +1311,7 @@ static const struct file_operations *debugfs_regs[] = { static const char *debugfs_regs_names[] = { "amdgpu_regs", + "amdgpu_regs2", "amdgpu_regs_didt", "amdgpu_regs_pcie", "amdgpu_regs_smc", @@ -1206,7 +1358,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) } /* Avoid accidently unparking the sched thread during GPU reset */ - r = down_read_killable(&adev->reset_sem); + r = down_write_killable(&adev->reset_sem); if (r) return r; @@ -1235,7 +1387,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) kthread_unpark(ring->sched.thread); } - up_read(&adev->reset_sem); + up_write(&adev->reset_sem); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); @@ -1582,9 +1734,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) if (!ring) continue; - if (amdgpu_debugfs_ring_init(adev, ring)) { - DRM_ERROR("Failed to register debugfs file for rings !\n"); - } + amdgpu_debugfs_ring_init(adev, ring); } amdgpu_ras_debugfs_create_all(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index 141a8474e24f..6d4965b2d01e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -22,7 +22,6 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ - /* * Debugfs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ab3794c42d36..48089dc0180b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2745,6 +2745,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; } + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_virt_release_full_gpu(adev, false)) + DRM_ERROR("failed to release exclusive mode on fini\n"); + } + return 0; } @@ -2805,10 +2810,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_ras_fini(adev); - if (amdgpu_sriov_vf(adev)) - if (amdgpu_virt_release_full_gpu(adev, false)) - DRM_ERROR("failed to release exclusive mode on fini\n"); - return 0; } @@ -3538,17 +3539,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - /* enable PCIE atomic ops */ - r = pci_enable_atomic_ops_to_root(adev->pdev, - PCI_EXP_DEVCAP2_ATOMIC_COMP32 | - PCI_EXP_DEVCAP2_ATOMIC_COMP64); - if (r) { - adev->have_atomics_support = false; - DRM_INFO("PCIE atomic ops is not supported\n"); - } else { - adev->have_atomics_support = true; - } - amdgpu_device_get_pcie_info(adev); if (amdgpu_mcbp) @@ -3571,6 +3561,19 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; + /* enable PCIE atomic ops */ + if (amdgpu_sriov_vf(adev)) + adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) + adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags == + (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + else + adev->have_atomics_support = + !pci_enable_atomic_ops_to_root(adev->pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP32 | + PCI_EXP_DEVCAP2_ATOMIC_COMP64); + if (!adev->have_atomics_support) + dev_info(adev->dev, "PCIE atomic ops is not supported\n"); + /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f18240f87387..15b27bcf5273 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1508,6 +1508,10 @@ static int amdgpu_pmops_resume(struct device *dev) struct amdgpu_device *adev = drm_to_adev(drm_dev); int r; + /* Avoids registers access if device is physically gone */ + if (!pci_device_is_present(adev->pdev)) + adev->no_hw_access = true; + r = amdgpu_device_resume(drm_dev, true); if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 76efd5f8950f..d7e4f4660acf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -34,6 +34,7 @@ #include <asm/set_memory.h> #endif #include "amdgpu.h" +#include <drm/drm_drv.h> /* * GART @@ -230,12 +231,16 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, u64 page_base; /* Starting from VEGA10, system bit must be 0 to mean invalid. */ uint64_t flags = 0; + int idx; if (!adev->gart.ready) { WARN(1, "trying to unbind memory from uninitialized GART !\n"); return -EINVAL; } + if (!drm_dev_enter(&adev->ddev, &idx)) + return 0; + t = offset / AMDGPU_GPU_PAGE_SIZE; p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; for (i = 0; i < pages; i++, p++) { @@ -254,6 +259,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); + drm_dev_exit(idx); return 0; } @@ -276,12 +282,16 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, { uint64_t page_base; unsigned i, j, t; + int idx; if (!adev->gart.ready) { WARN(1, "trying to bind memory to uninitialized GART !\n"); return -EINVAL; } + if (!drm_dev_enter(&adev->ddev, &idx)) + return 0; + t = offset / AMDGPU_GPU_PAGE_SIZE; for (i = 0; i < pages; i++) { @@ -291,6 +301,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, page_base += AMDGPU_GPU_PAGE_SIZE; } } + drm_dev_exit(idx); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d6aa032890ee..a573424a6e0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -60,10 +60,9 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) goto unlock; } - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); - - drm_dev_exit(idx); + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT, 1); + drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index d43fe2ed8116..f851196c83a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -42,10 +42,9 @@ #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES -enum gfx_pipe_priority { - AMDGPU_GFX_PIPE_PRIO_NORMAL = 1, - AMDGPU_GFX_PIPE_PRIO_HIGH, - AMDGPU_GFX_PIPE_PRIO_MAX +enum amdgpu_gfx_pipe_priority { + AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1, + AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2 }; /* Argument for PPSMC_MSG_GpuChangeState */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 9ff600a38559..a0dec7f211f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -153,10 +153,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, { void __iomem *ptr = (void *)cpu_pt_addr; uint64_t value; - int idx; - - if (!drm_dev_enter(&adev->ddev, &idx)) - return 0; /* * The following is for PTE only. GART does not have PDEs. @@ -165,8 +161,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, value |= flags; writeq(value, ptr + (gpu_page_idx * 8)); - drm_dev_exit(idx); - return 0; } @@ -749,6 +743,10 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; u64 vram_end = vram_addr + vram_size; u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); + int idx; + + if (!drm_dev_enter(&adev->ddev, &idx)) + return; flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; flags |= AMDGPU_PTE_WRITEABLE; @@ -770,6 +768,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED; /* Requires gart_ptb_gpu_pa to be 4K aligned */ amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); + drm_dev_exit(idx); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index c076a6b9a5a2..bc1297dcdf97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -300,20 +300,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, */ int amdgpu_ib_pool_init(struct amdgpu_device *adev) { - unsigned size; int r, i; if (adev->ib_pool_ready) return 0; for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { - if (i == AMDGPU_IB_POOL_DIRECT) - size = PAGE_SIZE * 6; - else - size = AMDGPU_IB_POOL_SIZE; - r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], - size, AMDGPU_GPU_PAGE_SIZE, + AMDGPU_IB_POOL_SIZE, + AMDGPU_GPU_PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 7e45640fbee0..d2955ea4a62b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -341,27 +341,34 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, switch (query_fw->index) { case TA_FW_TYPE_PSP_XGMI: fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.xgmi.feature_version; + fw_info->feature = adev->psp.xgmi_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_RAS: fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.ras.feature_version; + fw_info->feature = adev->psp.ras_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_HDCP: fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.hdcp.feature_version; + fw_info->feature = adev->psp.hdcp_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_DTM: fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.dtm.feature_version; + fw_info->feature = adev->psp.dtm_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_RAP: fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.rap.feature_version; + fw_info->feature = adev->psp.rap_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_SECUREDISPLAY: fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.securedisplay.feature_version; + fw_info->feature = + adev->psp.securedisplay_context.context.bin_desc + .feature_version; break; default: return -EINVAL; @@ -378,8 +385,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->feature = adev->psp.sos.feature_version; break; case AMDGPU_INFO_FW_ASD: - fw_info->ver = adev->psp.asd.fw_version; - fw_info->feature = adev->psp.asd.feature_version; + fw_info->ver = adev->psp.asd_context.bin_desc.fw_version; + fw_info->feature = adev->psp.asd_context.bin_desc.feature_version; break; case AMDGPU_INFO_FW_DMCU: fw_info->ver = adev->dm.dmcu_fw_version; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index a2d3dbbf7d25..ce538f4819f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -31,7 +31,7 @@ void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev, uint64_t mc_status_addr, unsigned long *error_count) { - uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4); + uint64_t mc_status = RREG64_PCIE(mc_status_addr); if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) @@ -42,7 +42,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev, uint64_t mc_status_addr, unsigned long *error_count) { - uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4); + uint64_t mc_status = RREG64_PCIE(mc_status_addr); if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || @@ -56,7 +56,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev, void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, uint64_t mc_status_addr) { - WREG64_PCIE(mc_status_addr * 4, 0x0ULL); + WREG64_PCIE(mc_status_addr, 0x0ULL); } void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, @@ -87,8 +87,8 @@ int amdgpu_mca_ras_late_init(struct amdgpu_device *adev, if (!mca_dev->ras_if) return -ENOMEM; mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block; + mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block; mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - mca_dev->ras_if->sub_block_index = 0; } ih_info.head = fs_info.head = *mca_dev->ras_if; r = amdgpu_ras_late_init(adev, mca_dev->ras_if, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index f860f2f0e296..c74bc7177066 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -29,6 +29,7 @@ struct amdgpu_mca_ras_funcs { void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); uint32_t ras_block; + uint32_t ras_sub_block; const char* sysfs_name; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 01a78c786536..32f36992291e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -695,40 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, } /** - * amdgpu_bo_validate - validate an &amdgpu_bo buffer object - * @bo: pointer to the buffer object - * - * Sets placement according to domain; and changes placement and caching - * policy of the buffer object according to the placement. - * This is used for validating shadow bos. It calls ttm_bo_validate() to - * make sure the buffer is resident where it needs to be. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_validate(struct amdgpu_bo *bo) -{ - struct ttm_operation_ctx ctx = { false, false }; - uint32_t domain; - int r; - - if (bo->tbo.pin_count) - return 0; - - domain = bo->preferred_domains; - -retry: - amdgpu_bo_placement_from_domain(bo, domain); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { - domain = bo->allowed_domains; - goto retry; - } - - return r; -} - -/** * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list * * @vmbo: BO that will be inserted into the shadow list diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 9d6c001c15f8..d2cbabaf2a46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -327,7 +327,6 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); -int amdgpu_bo_validate(struct amdgpu_bo *bo); void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, uint64_t *gtt_mem, uint64_t *cpu_mem); void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 9b41cb8c3de5..55ffc3da89ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -46,6 +46,10 @@ static int psp_sysfs_init(struct amdgpu_device *adev); static void psp_sysfs_fini(struct amdgpu_device *adev); static int psp_load_smu_fw(struct psp_context *psp); +static int psp_ta_unload(struct psp_context *psp, struct ta_context *context); +static int psp_ta_load(struct psp_context *psp, struct ta_context *context); +static int psp_rap_terminate(struct psp_context *psp); +static int psp_securedisplay_terminate(struct psp_context *psp); /* * Due to DF Cstate management centralized to PMFW, the firmware @@ -778,46 +782,29 @@ static int psp_rl_load(struct amdgpu_device *adev) return ret; } -static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t asd_mc, uint32_t size) +static int psp_asd_load(struct psp_context *psp) { - cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); - cmd->cmd.cmd_load_ta.app_len = size; - - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; - cmd->cmd.cmd_load_ta.cmd_buf_len = 0; + return psp_ta_load(psp, &psp->asd_context); } -static int psp_asd_load(struct psp_context *psp) +static int psp_asd_initialize(struct psp_context *psp) { int ret; - struct psp_gfx_cmd_resp *cmd; /* If PSP version doesn't match ASD version, asd loading will be failed. * add workaround to bypass it for sriov now. * TODO: add version check to make it common */ - if (amdgpu_sriov_vf(psp->adev) || !psp->asd.size_bytes) + if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) return 0; - cmd = acquire_psp_cmd_buf(psp); - - psp_copy_fw(psp, psp->asd.start_addr, psp->asd.size_bytes); + psp->asd_context.mem_context.shared_mc_addr = 0; + psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; + psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; - psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->asd.size_bytes); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - if (!ret) { - psp->asd_context.asd_initialized = true; - psp->asd_context.session_id = cmd->resp.session_id; - } - - release_psp_cmd_buf(psp); + ret = psp_asd_load(psp); + if (!ret) + psp->asd_context.initialized = true; return ret; } @@ -829,27 +816,39 @@ static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, cmd->cmd.cmd_unload_ta.session_id = session_id; } +static int psp_ta_unload(struct psp_context *psp, struct ta_context *context) +{ + int ret; + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + + psp_prep_ta_unload_cmd_buf(cmd, context->session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + release_psp_cmd_buf(psp); + + return ret; +} + static int psp_asd_unload(struct psp_context *psp) { + return psp_ta_unload(psp, &psp->asd_context); +} + +static int psp_asd_terminate(struct psp_context *psp) +{ int ret; - struct psp_gfx_cmd_resp *cmd; if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->asd_context.asd_initialized) + if (!psp->asd_context.initialized) return 0; - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); + ret = psp_asd_unload(psp); - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); if (!ret) - psp->asd_context.asd_initialized = false; - - release_psp_cmd_buf(psp); + psp->asd_context.initialized = false; return ret; } @@ -885,23 +884,22 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, uint64_t ta_bin_mc, - uint32_t ta_bin_size, - uint64_t ta_shared_mc, - uint32_t ta_shared_size) + struct ta_context *context) { - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd_id = context->ta_load_type; cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); - cmd->cmd.cmd_load_ta.app_len = ta_bin_size; + cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); - cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = + lower_32_bits(context->mem_context.shared_mc_addr); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = + upper_32_bits(context->mem_context.shared_mc_addr); + cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; } static int psp_ta_init_shared_buf(struct psp_context *psp, - struct ta_mem_context *mem_ctx, - uint32_t shared_mem_size) + struct ta_mem_context *mem_ctx) { int ret; @@ -909,8 +907,8 @@ static int psp_ta_init_shared_buf(struct psp_context *psp, * Allocate 16k memory aligned to 4k from Frame Buffer (local * physical) for ta to host memory */ - ret = amdgpu_bo_create_kernel(psp->adev, shared_mem_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + ret = amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, &mem_ctx->shared_buf); @@ -926,8 +924,7 @@ static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) static int psp_xgmi_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context, - PSP_XGMI_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); } static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, @@ -956,31 +953,23 @@ static int psp_ta_invoke(struct psp_context *psp, return ret; } -static int psp_xgmi_load(struct psp_context *psp) +static int psp_ta_load(struct psp_context *psp, struct ta_context *context) { int ret; struct psp_gfx_cmd_resp *cmd; - /* - * TODO: bypass the loading in sriov for now - */ - cmd = acquire_psp_cmd_buf(psp); - psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes); + psp_copy_fw(psp, context->bin_desc.start_addr, + context->bin_desc.size_bytes); - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->xgmi.size_bytes, - psp->xgmi_context.context.mem_context.shared_mc_addr, - PSP_XGMI_SHARED_MEM_SIZE); + psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); if (!ret) { - psp->xgmi_context.context.initialized = true; - psp->xgmi_context.context.session_id = cmd->resp.session_id; + context->session_id = cmd->resp.session_id; } release_psp_cmd_buf(psp); @@ -988,31 +977,14 @@ static int psp_xgmi_load(struct psp_context *psp) return ret; } -static int psp_xgmi_unload(struct psp_context *psp) +static int psp_xgmi_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - struct amdgpu_device *adev = psp->adev; - - /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ - if (adev->asic_type == CHIP_ARCTURUS || - (adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu)) - return 0; - - /* - * TODO: bypass the unloading in sriov for now - */ - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); + return psp_ta_load(psp, &psp->xgmi_context.context); +} - return ret; +static int psp_xgmi_unload(struct psp_context *psp) +{ + return psp_ta_unload(psp, &psp->xgmi_context.context); } int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) @@ -1023,6 +995,12 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) int psp_xgmi_terminate(struct psp_context *psp) { int ret; + struct amdgpu_device *adev = psp->adev; + + /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ + if (adev->asic_type == CHIP_ARCTURUS || + (adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu)) + return 0; if (!psp->xgmi_context.context.initialized) return 0; @@ -1045,13 +1023,16 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo int ret; if (!psp->ta_fw || - !psp->xgmi.size_bytes || - !psp->xgmi.start_addr) + !psp->xgmi_context.context.bin_desc.size_bytes || + !psp->xgmi_context.context.bin_desc.start_addr) return -ENOENT; if (!load_ta) goto invoke; + psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; + psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->xgmi_context.context.initialized) { ret = psp_xgmi_init_shared_buf(psp); if (ret) @@ -1060,7 +1041,9 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo /* Load XGMI TA */ ret = psp_xgmi_load(psp); - if (ret) + if (!ret) + psp->xgmi_context.context.initialized = true; + else return ret; invoke: @@ -1118,7 +1101,7 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) { return psp->adev->asic_type == CHIP_ALDEBARAN && - psp->xgmi.feature_version >= 0x2000000b; + psp->xgmi_context.context.bin_desc.feature_version >= 0x2000000b; } /* @@ -1282,80 +1265,17 @@ int psp_xgmi_set_topology_info(struct psp_context *psp, // ras begin static int psp_ras_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context, - PSP_RAS_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); } static int psp_ras_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - struct ta_ras_shared_memory *ras_cmd; - - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - psp_copy_fw(psp, psp->ras.start_addr, psp->ras.size_bytes); - - ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; - - if (psp->adev->gmc.xgmi.connected_to_cpu) - ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; - else - ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->ras.size_bytes, - psp->ras_context.context.mem_context.shared_mc_addr, - PSP_RAS_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - - if (!ret) { - psp->ras_context.context.session_id = cmd->resp.session_id; - - if (!ras_cmd->ras_status) - psp->ras_context.context.initialized = true; - else - dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); - } - - release_psp_cmd_buf(psp); - - if (ret || ras_cmd->ras_status) - amdgpu_ras_fini(psp->adev); - - return ret; + return psp_ta_load(psp, &psp->ras_context.context); } static int psp_ras_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the unloading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->ras_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_unload(psp, &psp->ras_context.context); } int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) @@ -1391,31 +1311,11 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) dev_warn(psp->adev->dev, "RAS internal register access blocked\n"); - } - return ret; -} - -static int psp_ras_status_to_errno(struct amdgpu_device *adev, - enum ta_ras_status ras_status) -{ - int ret = -EINVAL; - - switch (ras_status) { - case TA_RAS_STATUS__SUCCESS: - ret = 0; - break; - case TA_RAS_STATUS__RESET_NEEDED: - ret = -EAGAIN; - break; - case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE: - dev_warn(adev->dev, "RAS WARN: ras function unavailable\n"); - break; - case TA_RAS_STATUS__ERROR_ASD_READ_WRITE: - dev_warn(adev->dev, "RAS WARN: asd read or write failed\n"); - break; - default: - dev_err(adev->dev, "RAS ERROR: ras function failed ret 0x%X\n", ret); + if (ras_cmd->ras_status == TA_RAS_STATUS__ERROR_UNSUPPORTED_IP) + dev_warn(psp->adev->dev, "RAS WARNING: cmd failed due to unsupported ip\n"); + else if (ras_cmd->ras_status) + dev_warn(psp->adev->dev, "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); } return ret; @@ -1444,7 +1344,7 @@ int psp_ras_enable_features(struct psp_context *psp, if (ret) return -EINVAL; - return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status); + return 0; } static int psp_ras_terminate(struct psp_context *psp) @@ -1477,6 +1377,7 @@ static int psp_ras_initialize(struct psp_context *psp) int ret; uint32_t boot_cfg = 0xFF; struct amdgpu_device *adev = psp->adev; + struct ta_ras_shared_memory *ras_cmd; /* * TODO: bypass the initialize in sriov for now @@ -1484,8 +1385,8 @@ static int psp_ras_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(adev)) return 0; - if (!adev->psp.ras.size_bytes || - !adev->psp.ras.start_addr) { + if (!adev->psp.ras_context.context.bin_desc.size_bytes || + !adev->psp.ras_context.context.bin_desc.start_addr) { dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); return 0; } @@ -1531,17 +1432,34 @@ static int psp_ras_initialize(struct psp_context *psp) } } + psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; + psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->ras_context.context.initialized) { ret = psp_ras_init_shared_buf(psp); if (ret) return ret; } + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; + memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); + + if (psp->adev->gmc.xgmi.connected_to_cpu) + ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; + else + ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; + ret = psp_ras_load(psp); - if (ret) - return ret; - return 0; + if (!ret && !ras_cmd->ras_status) + psp->ras_context.context.initialized = true; + else { + if (ras_cmd->ras_status) + dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); + amdgpu_ras_fini(psp->adev); + } + + return ret; } int psp_ras_trigger_error(struct psp_context *psp, @@ -1568,51 +1486,24 @@ int psp_ras_trigger_error(struct psp_context *psp, if (amdgpu_ras_intr_triggered()) return 0; - return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status); + if (ras_cmd->ras_status) + return -EINVAL; + + return 0; } // ras end // HDCP start static int psp_hdcp_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context, - PSP_HDCP_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); } static int psp_hdcp_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - psp_copy_fw(psp, psp->hdcp.start_addr, - psp->hdcp.size_bytes); - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->hdcp.size_bytes, - psp->hdcp_context.context.mem_context.shared_mc_addr, - PSP_HDCP_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - if (!ret) { - psp->hdcp_context.context.initialized = true; - psp->hdcp_context.context.session_id = cmd->resp.session_id; - mutex_init(&psp->hdcp_context.mutex); - } - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_load(psp, &psp->hdcp_context.context); } + static int psp_hdcp_initialize(struct psp_context *psp) { int ret; @@ -1623,12 +1514,15 @@ static int psp_hdcp_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->hdcp.size_bytes || - !psp->hdcp.start_addr) { + if (!psp->hdcp_context.context.bin_desc.size_bytes || + !psp->hdcp_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); return 0; } + psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; + psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->hdcp_context.context.initialized) { ret = psp_hdcp_init_shared_buf(psp); if (ret) @@ -1636,32 +1530,17 @@ static int psp_hdcp_initialize(struct psp_context *psp) } ret = psp_hdcp_load(psp); - if (ret) - return ret; + if (!ret) { + psp->hdcp_context.context.initialized = true; + mutex_init(&psp->hdcp_context.mutex); + } - return 0; + return ret; } static int psp_hdcp_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the unloading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_unload(psp, &psp->hdcp_context.context); } int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) @@ -1709,42 +1588,12 @@ out: // DTM start static int psp_dtm_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context, - PSP_DTM_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); } static int psp_dtm_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - psp_copy_fw(psp, psp->dtm.start_addr, psp->dtm.size_bytes); - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->dtm.size_bytes, - psp->dtm_context.context.mem_context.shared_mc_addr, - PSP_DTM_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - if (!ret) { - psp->dtm_context.context.initialized = true; - psp->dtm_context.context.session_id = cmd->resp.session_id; - mutex_init(&psp->dtm_context.mutex); - } - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_load(psp, &psp->dtm_context.context); } static int psp_dtm_initialize(struct psp_context *psp) @@ -1757,12 +1606,15 @@ static int psp_dtm_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->dtm.size_bytes || - !psp->dtm.start_addr) { + if (!psp->dtm_context.context.bin_desc.size_bytes || + !psp->dtm_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); return 0; } + psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; + psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->dtm_context.context.initialized) { ret = psp_dtm_init_shared_buf(psp); if (ret) @@ -1770,32 +1622,17 @@ static int psp_dtm_initialize(struct psp_context *psp) } ret = psp_dtm_load(psp); - if (ret) - return ret; + if (!ret) { + psp->dtm_context.context.initialized = true; + mutex_init(&psp->dtm_context.mutex); + } - return 0; + return ret; } static int psp_dtm_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the unloading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_unload(psp, &psp->dtm_context.context); } int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) @@ -1843,50 +1680,17 @@ out: // RAP start static int psp_rap_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context, - PSP_RAP_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); } static int psp_rap_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - psp_copy_fw(psp, psp->rap.start_addr, psp->rap.size_bytes); - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->rap.size_bytes, - psp->rap_context.context.mem_context.shared_mc_addr, - PSP_RAP_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - if (!ret) { - psp->rap_context.context.initialized = true; - psp->rap_context.context.session_id = cmd->resp.session_id; - mutex_init(&psp->rap_context.mutex); - } - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_load(psp, &psp->rap_context.context); } static int psp_rap_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_unload(psp, &psp->rap_context.context); } static int psp_rap_initialize(struct psp_context *psp) @@ -1900,12 +1704,15 @@ static int psp_rap_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->rap.size_bytes || - !psp->rap.start_addr) { + if (!psp->rap_context.context.bin_desc.size_bytes || + !psp->rap_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); return 0; } + psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; + psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->rap_context.context.initialized) { ret = psp_rap_init_shared_buf(psp); if (ret) @@ -1913,16 +1720,15 @@ static int psp_rap_initialize(struct psp_context *psp) } ret = psp_rap_load(psp); - if (ret) + if (!ret) { + psp->rap_context.context.initialized = true; + mutex_init(&psp->rap_context.mutex); + } else return ret; ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); if (ret || status != TA_RAP_STATUS__SUCCESS) { - psp_rap_unload(psp); - - psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); - - psp->rap_context.context.initialized = false; + psp_rap_terminate(psp); dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", ret, status); @@ -1989,49 +1795,17 @@ out_unlock: static int psp_securedisplay_init_shared_buf(struct psp_context *psp) { return psp_ta_init_shared_buf( - psp, &psp->securedisplay_context.context.mem_context, - PSP_SECUREDISPLAY_SHARED_MEM_SIZE); + psp, &psp->securedisplay_context.context.mem_context); } static int psp_securedisplay_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->securedisplay.start_addr, psp->securedisplay.size_bytes); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->securedisplay.size_bytes, - psp->securedisplay_context.context.mem_context.shared_mc_addr, - PSP_SECUREDISPLAY_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - if (!ret) { - psp->securedisplay_context.context.initialized = true; - psp->securedisplay_context.context.session_id = cmd->resp.session_id; - mutex_init(&psp->securedisplay_context.mutex); - } - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_load(psp, &psp->securedisplay_context.context); } static int psp_securedisplay_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_unload(psp, &psp->securedisplay_context.context); } static int psp_securedisplay_initialize(struct psp_context *psp) @@ -2045,12 +1819,16 @@ static int psp_securedisplay_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->securedisplay.size_bytes || - !psp->securedisplay.start_addr) { + if (!psp->securedisplay_context.context.bin_desc.size_bytes || + !psp->securedisplay_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); return 0; } + psp->securedisplay_context.context.mem_context.shared_mem_size = + PSP_SECUREDISPLAY_SHARED_MEM_SIZE; + psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->securedisplay_context.context.initialized) { ret = psp_securedisplay_init_shared_buf(psp); if (ret) @@ -2058,7 +1836,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp) } ret = psp_securedisplay_load(psp); - if (ret) + if (!ret) { + psp->securedisplay_context.context.initialized = true; + mutex_init(&psp->securedisplay_context.mutex); + } else return ret; psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, @@ -2066,12 +1847,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp) ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); if (ret) { - psp_securedisplay_unload(psp); - - psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); - - psp->securedisplay_context.context.initialized = false; - + psp_securedisplay_terminate(psp); dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); return -EINVAL; } @@ -2629,7 +2405,7 @@ skip_memalloc: if (ret) goto failed; - ret = psp_asd_load(psp); + ret = psp_asd_initialize(psp); if (ret) { DRM_ERROR("PSP load asd failed!\n"); return ret; @@ -2721,7 +2497,7 @@ static int psp_hw_fini(void *handle) psp_hdcp_terminate(psp); } - psp_asd_unload(psp); + psp_asd_terminate(psp); psp_tmr_terminate(psp); psp_ring_destroy(psp, PSP_RING_TYPE__KM); @@ -2779,9 +2555,9 @@ static int psp_suspend(void *handle) } } - ret = psp_asd_unload(psp); + ret = psp_asd_terminate(psp); if (ret) { - DRM_ERROR("Failed to unload asd\n"); + DRM_ERROR("Failed to terminate asd\n"); return ret; } @@ -2826,7 +2602,7 @@ static int psp_resume(void *handle) if (ret) goto failed; - ret = psp_asd_load(psp); + ret = psp_asd_initialize(psp); if (ret) { DRM_ERROR("PSP load asd failed!\n"); goto failed; @@ -2994,10 +2770,10 @@ int psp_init_asd_microcode(struct psp_context *psp, goto out; asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); - adev->psp.asd.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); - adev->psp.asd.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); - adev->psp.asd.start_addr = (uint8_t *)asd_hdr + + adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); + adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); + adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); + adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); return 0; out: @@ -3284,40 +3060,43 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, switch (desc->fw_type) { case TA_FW_TYPE_PSP_ASD: - psp->asd.fw_version = le32_to_cpu(desc->fw_version); - psp->asd.feature_version = le32_to_cpu(desc->fw_version); - psp->asd.size_bytes = le32_to_cpu(desc->size_bytes); - psp->asd.start_addr = ucode_start_addr; + psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->asd_context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_XGMI: - psp->xgmi.feature_version = le32_to_cpu(desc->fw_version); - psp->xgmi.size_bytes = le32_to_cpu(desc->size_bytes); - psp->xgmi.start_addr = ucode_start_addr; + psp->xgmi_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_RAS: - psp->ras.feature_version = le32_to_cpu(desc->fw_version); - psp->ras.size_bytes = le32_to_cpu(desc->size_bytes); - psp->ras.start_addr = ucode_start_addr; + psp->ras_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_HDCP: - psp->hdcp.feature_version = le32_to_cpu(desc->fw_version); - psp->hdcp.size_bytes = le32_to_cpu(desc->size_bytes); - psp->hdcp.start_addr = ucode_start_addr; + psp->hdcp_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_DTM: - psp->dtm.feature_version = le32_to_cpu(desc->fw_version); - psp->dtm.size_bytes = le32_to_cpu(desc->size_bytes); - psp->dtm.start_addr = ucode_start_addr; + psp->dtm_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_RAP: - psp->rap.feature_version = le32_to_cpu(desc->fw_version); - psp->rap.size_bytes = le32_to_cpu(desc->size_bytes); - psp->rap.start_addr = ucode_start_addr; + psp->rap_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_SECUREDISPLAY: - psp->securedisplay.feature_version = le32_to_cpu(desc->fw_version); - psp->securedisplay.size_bytes = le32_to_cpu(desc->size_bytes); - psp->securedisplay.start_addr = ucode_start_addr; + psp->securedisplay_context.context.bin_desc.feature_version = + le32_to_cpu(desc->fw_version); + psp->securedisplay_context.context.bin_desc.size_bytes = + le32_to_cpu(desc->size_bytes); + psp->securedisplay_context.context.bin_desc.start_addr = + ucode_start_addr; break; default: dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 8ef2d28af92a..f29afabbff1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -34,17 +34,20 @@ #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 -#define PSP_XGMI_SHARED_MEM_SIZE 0x4000 -#define PSP_RAS_SHARED_MEM_SIZE 0x4000 #define PSP_1_MEG 0x100000 #define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000) -#define PSP_HDCP_SHARED_MEM_SIZE 0x4000 -#define PSP_DTM_SHARED_MEM_SIZE 0x4000 -#define PSP_RAP_SHARED_MEM_SIZE 0x4000 -#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000 -#define PSP_SHARED_MEM_SIZE 0x4000 #define PSP_FW_NAME_LEN 0x24 +enum psp_shared_mem_size { + PSP_ASD_SHARED_MEM_SIZE = 0x0, + PSP_XGMI_SHARED_MEM_SIZE = 0x4000, + PSP_RAS_SHARED_MEM_SIZE = 0x4000, + PSP_HDCP_SHARED_MEM_SIZE = 0x4000, + PSP_DTM_SHARED_MEM_SIZE = 0x4000, + PSP_RAP_SHARED_MEM_SIZE = 0x4000, + PSP_SECUREDISPLAY_SHARED_MEM_SIZE = 0x4000, +}; + struct psp_context; struct psp_xgmi_node_info; struct psp_xgmi_topology_info; @@ -131,21 +134,26 @@ struct psp_xgmi_topology_info { struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; }; -struct psp_asd_context { - bool asd_initialized; - uint32_t session_id; +struct psp_bin_desc { + uint32_t fw_version; + uint32_t feature_version; + uint32_t size_bytes; + uint8_t *start_addr; }; struct ta_mem_context { struct amdgpu_bo *shared_bo; uint64_t shared_mc_addr; void *shared_buf; + enum psp_shared_mem_size shared_mem_size; }; struct ta_context { bool initialized; uint32_t session_id; struct ta_mem_context mem_context; + struct psp_bin_desc bin_desc; + enum psp_gfx_cmd_id ta_load_type; }; struct ta_cp_context { @@ -263,13 +271,6 @@ struct psp_runtime_boot_cfg_entry { uint32_t reserved; }; -struct psp_bin_desc { - uint32_t fw_version; - uint32_t feature_version; - uint32_t size_bytes; - uint8_t *start_addr; -}; - struct psp_context { struct amdgpu_device *adev; @@ -301,7 +302,6 @@ struct psp_context /* asd firmware */ const struct firmware *asd_fw; - struct psp_bin_desc asd; /* toc firmware */ const struct firmware *toc_fw; @@ -326,14 +326,8 @@ struct psp_context /* xgmi ta firmware and buffer */ const struct firmware *ta_fw; uint32_t ta_fw_version; - struct psp_bin_desc xgmi; - struct psp_bin_desc ras; - struct psp_bin_desc hdcp; - struct psp_bin_desc dtm; - struct psp_bin_desc rap; - struct psp_bin_desc securedisplay; - - struct psp_asd_context asd_context; + + struct ta_context asd_context; struct psp_xgmi_context xgmi_context; struct psp_ras_context ras_context; struct ta_cp_context hdcp_context; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 96a8fd0ca1df..e1c34eef76b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -61,8 +61,30 @@ const char *ras_block_string[] = { "mp0", "mp1", "fuse", + "mca", }; +const char *ras_mca_block_string[] = { + "mca_mp0", + "mca_mp1", + "mca_mpio", + "mca_iohc", +}; + +const char *get_ras_block_str(struct ras_common_if *ras_block) +{ + if (!ras_block) + return "NULL"; + + if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT) + return "OUT OF RANGE"; + + if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) + return ras_mca_block_string[ras_block->sub_block_index]; + + return ras_block_string[ras_block->block]; +} + #define ras_err_str(i) (ras_error_string[ffs(i)]) #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) @@ -187,7 +209,7 @@ static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { *block_id = i; - if (strcmp(name, ras_block_str(i)) == 0) + if (strcmp(name, ras_block_string[i]) == 0) return 0; } return -EINVAL; @@ -509,7 +531,6 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - if (obj->adev->asic_type == CHIP_ALDEBARAN) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) DRM_WARN("Failed to reset error counter and error status"); @@ -529,7 +550,7 @@ static inline void put_obj(struct ras_manager *obj) if (obj && (--obj->use == 0)) list_del(&obj->node); if (obj && (obj->use < 0)) - DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block)); + DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); } /* make one obj and return it. */ @@ -545,7 +566,14 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, if (head->block >= AMDGPU_RAS_BLOCK_COUNT) return NULL; - obj = &con->objs[head->block]; + if (head->block == AMDGPU_RAS_BLOCK__MCA) { + if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) + return NULL; + + obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; + } else + obj = &con->objs[head->block]; + /* already exist. return obj? */ if (alive_obj(obj)) return NULL; @@ -573,19 +601,21 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, if (head->block >= AMDGPU_RAS_BLOCK_COUNT) return NULL; - obj = &con->objs[head->block]; + if (head->block == AMDGPU_RAS_BLOCK__MCA) { + if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) + return NULL; + + obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; + } else + obj = &con->objs[head->block]; - if (alive_obj(obj)) { - WARN_ON(head->block != obj->head.block); + if (alive_obj(obj)) return obj; - } } else { - for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { + for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { obj = &con->objs[i]; - if (alive_obj(obj)) { - WARN_ON(i != obj->head.block); + if (alive_obj(obj)) return obj; - } } } @@ -626,8 +656,6 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, */ if (!amdgpu_ras_is_feature_allowed(adev, head)) return 0; - if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) - return 0; if (enable) { if (!obj) { @@ -678,18 +706,13 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, /* Do not enable if it is not allowed. */ WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); - /* Are we alerady in that state we are going to set? */ - if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) { - ret = 0; - goto out; - } if (!amdgpu_ras_intr_triggered()) { ret = psp_ras_enable_features(&adev->psp, info, enable); if (ret) { dev_err(adev->dev, "ras %s %s failed %d\n", enable ? "enable":"disable", - ras_block_str(head->block), + get_ras_block_str(head), ret); goto out; } @@ -731,7 +754,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, if (!ret) dev_info(adev->dev, "RAS INFO: %s setup object\n", - ras_block_str(head->block)); + get_ras_block_str(head)); } } else { /* setup the object then issue a ras TA disable cmd.*/ @@ -781,17 +804,39 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, bool bypass) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - int ras_block_count = AMDGPU_RAS_BLOCK_COUNT; int i; - const enum amdgpu_ras_error_type default_ras_type = - AMDGPU_RAS_ERROR__NONE; + const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; - for (i = 0; i < ras_block_count; i++) { + for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { struct ras_common_if head = { .block = i, .type = default_ras_type, .sub_block_index = 0, }; + + if (i == AMDGPU_RAS_BLOCK__MCA) + continue; + + if (bypass) { + /* + * bypass psp. vbios enable ras for us. + * so just create the obj + */ + if (__amdgpu_ras_feature_enable(adev, &head, 1)) + break; + } else { + if (amdgpu_ras_feature_enable(adev, &head, 1)) + break; + } + } + + for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { + struct ras_common_if head = { + .block = AMDGPU_RAS_BLOCK__MCA, + .type = default_ras_type, + .sub_block_index = i, + }; + if (bypass) { /* * bypass psp. vbios enable ras for us. @@ -809,6 +854,32 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, } /* feature ctl end */ + +void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_err_data *err_data) +{ + switch (ras_block->sub_block_index) { + case AMDGPU_RAS_MCA_BLOCK__MP0: + if (adev->mca.mp0.ras_funcs && + adev->mca.mp0.ras_funcs->query_ras_error_count) + adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_MCA_BLOCK__MP1: + if (adev->mca.mp1.ras_funcs && + adev->mca.mp1.ras_funcs->query_ras_error_count) + adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_MCA_BLOCK__MPIO: + if (adev->mca.mpio.ras_funcs && + adev->mca.mpio.ras_funcs->query_ras_error_count) + adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data); + break; + default: + break; + } +} + /* query/inject/cure begin */ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) @@ -872,6 +943,9 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->hdp.ras_funcs->query_ras_error_count) adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data); break; + case AMDGPU_RAS_BLOCK__MCA: + amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data); + break; default: break; } @@ -893,13 +967,13 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->smuio.funcs->get_socket_id(adev), adev->smuio.funcs->get_die_id(adev), obj->err_data.ce_count, - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); } else { dev_info(adev->dev, "%ld correctable hardware errors " "detected in %s block, no user " "action is needed.\n", obj->err_data.ce_count, - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); } } if (err_data.ue_count) { @@ -912,12 +986,12 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->smuio.funcs->get_socket_id(adev), adev->smuio.funcs->get_die_id(adev), obj->err_data.ue_count, - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); } else { dev_info(adev->dev, "%ld uncorrectable hardware errors " "detected in %s block\n", obj->err_data.ue_count, - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); } } @@ -1027,6 +1101,7 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, case AMDGPU_RAS_BLOCK__SDMA: case AMDGPU_RAS_BLOCK__MMHUB: case AMDGPU_RAS_BLOCK__PCIE_BIF: + case AMDGPU_RAS_BLOCK__MCA: ret = psp_ras_trigger_error(&adev->psp, &block_info); break; case AMDGPU_RAS_BLOCK__XGMI_WAFL: @@ -1034,13 +1109,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, break; default: dev_info(adev->dev, "%s error injection is not supported yet\n", - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); ret = -EINVAL; } if (ret) dev_err(adev->dev, "ras inject %s failed %d\n", - ras_block_str(info->head.block), ret); + get_ras_block_str(&info->head), ret); return ret; } @@ -1383,7 +1458,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) if (amdgpu_ras_is_supported(adev, obj->head.block) && (obj->attr_inuse == 1)) { sprintf(fs_info.debugfs_name, "%s_err_inject", - ras_block_str(obj->head.block)); + get_ras_block_str(&obj->head)); fs_info.head = obj->head; amdgpu_ras_debugfs_create(adev, &fs_info, dir); } @@ -2056,19 +2131,6 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) } /* recovery end */ -/* return 0 if ras will reset gpu and repost.*/ -int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, - unsigned int block) -{ - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - - if (!ras) - return -EINVAL; - - ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET; - return 0; -} - static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { return adev->asic_type == CHIP_VEGA10 || @@ -2181,7 +2243,8 @@ int amdgpu_ras_init(struct amdgpu_device *adev) return 0; con = kmalloc(sizeof(struct amdgpu_ras) + - sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT, + sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + + sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, GFP_KERNEL|__GFP_ZERO); if (!con) return -ENOMEM; @@ -2306,12 +2369,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); if (r) { - if (r == -EAGAIN) { - /* request gpu reset. will run again */ - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - return 0; - } else if (adev->in_suspend || amdgpu_in_reset(adev)) { + if (adev->in_suspend || amdgpu_in_reset(adev)) { /* in resume phase, if fail to enable ras, * clean up all ras fs nodes, and disable ras */ goto cleanup; @@ -2403,19 +2461,6 @@ void amdgpu_ras_resume(struct amdgpu_device *adev) } } } - - if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) { - con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET; - /* setup ras obj state as disabled. - * for init_by_vbios case. - * if we want to enable ras, just enable it in a normal way. - * If we want do disable it, need setup ras obj as enabled, - * then issue another TA disable cmd. - * See feature_enable_on_boot - */ - amdgpu_ras_disable_all_features(adev, 1); - amdgpu_ras_reset_gpu(adev); - } } void amdgpu_ras_suspend(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index eae604fd90b8..37b3c40272b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -32,7 +32,6 @@ #include "amdgpu_ras_eeprom.h" #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0) -#define AMDGPU_RAS_FLAG_INIT_NEED_RESET (0x1 << 1) enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, @@ -49,15 +48,22 @@ enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__MP0, AMDGPU_RAS_BLOCK__MP1, AMDGPU_RAS_BLOCK__FUSE, - AMDGPU_RAS_BLOCK__MPIO, + AMDGPU_RAS_BLOCK__MCA, AMDGPU_RAS_BLOCK__LAST }; -extern const char *ras_block_string[]; +enum amdgpu_ras_mca_block { + AMDGPU_RAS_MCA_BLOCK__MP0 = 0, + AMDGPU_RAS_MCA_BLOCK__MP1, + AMDGPU_RAS_MCA_BLOCK__MPIO, + AMDGPU_RAS_MCA_BLOCK__IOHC, + + AMDGPU_RAS_MCA_BLOCK__LAST +}; -#define ras_block_str(i) (ras_block_string[i]) #define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST +#define AMDGPU_RAS_MCA_BLOCK_COUNT AMDGPU_RAS_MCA_BLOCK__LAST #define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1) enum amdgpu_ras_gfx_subblock { @@ -488,8 +494,6 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev, } int amdgpu_ras_recovery_init(struct amdgpu_device *adev); -int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, - unsigned int block); void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); @@ -544,6 +548,8 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) { return TA_RAS_BLOCK__MP1; case AMDGPU_RAS_BLOCK__FUSE: return TA_RAS_BLOCK__FUSE; + case AMDGPU_RAS_BLOCK__MCA: + return TA_RAS_BLOCK__MCA; default: WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block); return TA_RAS_BLOCK__UMC; @@ -638,4 +644,6 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev); int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev); +const char *get_ras_block_str(struct ras_common_if *ras_block); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 0554576d3695..ab2351ba9574 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -415,26 +415,20 @@ static const struct file_operations amdgpu_debugfs_ring_fops = { #endif -int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, - struct amdgpu_ring *ring) +void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring) { #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; - struct dentry *ent, *root = minor->debugfs_root; + struct dentry *root = minor->debugfs_root; char name[32]; sprintf(name, "amdgpu_ring_%s", ring->name); + debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring, + &amdgpu_debugfs_ring_fops, + ring->ring_size + 12); - ent = debugfs_create_file(name, - S_IFREG | S_IRUGO, root, - ring, &amdgpu_debugfs_ring_fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); - - i_size_write(ent->d_inode, ring->ring_size + 12); - ring->ent = ent; #endif - return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index e713d31619fe..4d380e79752c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -36,8 +36,13 @@ #define AMDGPU_MAX_VCE_RINGS 3 #define AMDGPU_MAX_UVD_ENC_RINGS 2 -#define AMDGPU_RING_PRIO_DEFAULT 1 -#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX +enum amdgpu_ring_priority_level { + AMDGPU_RING_PRIO_0, + AMDGPU_RING_PRIO_1, + AMDGPU_RING_PRIO_DEFAULT = 1, + AMDGPU_RING_PRIO_2, + AMDGPU_RING_PRIO_MAX +}; /* some special values for the owner field */ #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul) @@ -248,10 +253,6 @@ struct amdgpu_ring { bool has_compute_vm_bug; bool no_scheduler; int hw_prio; - -#if defined(CONFIG_DEBUG_FS) - struct dentry *ent; -#endif }; #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) @@ -351,8 +352,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, int amdgpu_ring_test_helper(struct amdgpu_ring *ring); -int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, - struct amdgpu_ring *ring); -void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); - +void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index b7d861ed5284..e9b45089a28a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -32,37 +32,9 @@ #include "amdgpu_sched.h" #include "amdgpu_vm.h" -int amdgpu_to_sched_priority(int amdgpu_priority, - enum drm_sched_priority *prio) -{ - switch (amdgpu_priority) { - case AMDGPU_CTX_PRIORITY_VERY_HIGH: - *prio = DRM_SCHED_PRIORITY_HIGH; - break; - case AMDGPU_CTX_PRIORITY_HIGH: - *prio = DRM_SCHED_PRIORITY_HIGH; - break; - case AMDGPU_CTX_PRIORITY_NORMAL: - *prio = DRM_SCHED_PRIORITY_NORMAL; - break; - case AMDGPU_CTX_PRIORITY_LOW: - case AMDGPU_CTX_PRIORITY_VERY_LOW: - *prio = DRM_SCHED_PRIORITY_MIN; - break; - case AMDGPU_CTX_PRIORITY_UNSET: - *prio = DRM_SCHED_PRIORITY_UNSET; - break; - default: - WARN(1, "Invalid context priority %d\n", amdgpu_priority); - return -EINVAL; - } - - return 0; -} - static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, int fd, - enum drm_sched_priority priority) + int32_t priority) { struct fd f = fdget(fd); struct amdgpu_fpriv *fpriv; @@ -89,7 +61,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev, int fd, unsigned ctx_id, - enum drm_sched_priority priority) + int32_t priority) { struct fd f = fdget(fd); struct amdgpu_fpriv *fpriv; @@ -124,7 +96,6 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, { union drm_amdgpu_sched *args = data; struct amdgpu_device *adev = drm_to_adev(dev); - enum drm_sched_priority priority; int r; /* First check the op, then the op's argument. @@ -138,21 +109,22 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - r = amdgpu_to_sched_priority(args->in.priority, &priority); - if (r) - return r; + if (!amdgpu_ctx_priority_is_valid(args->in.priority)) { + WARN(1, "Invalid context priority %d\n", args->in.priority); + return -EINVAL; + } switch (args->in.op) { case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: r = amdgpu_sched_process_priority_override(adev, args->in.fd, - priority); + args->in.priority); break; case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE: r = amdgpu_sched_context_priority_override(adev, args->in.fd, args->in.ctx_id, - priority); + args->in.priority); break; default: /* Impossible. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index abd8469380e5..527d67ded8a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -525,9 +525,9 @@ FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version); FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version); FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version); FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version); -FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version); -FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version); -FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version); +FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version); +FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.feature_version); +FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.feature_version); FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version); FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); @@ -572,6 +572,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL; const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL; const struct mes_firmware_header_v1_0 *mes_hdr = NULL; + u8 *ucode_addr; if (NULL == ucode->fw) return 0; @@ -588,94 +589,83 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data; mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP || - (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES_DATA && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_IRAM && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_DRAM && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) { - ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 || - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) { - ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - - le32_to_cpu(cp_hdr->jt_size) * 4; - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) { - ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4; - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes) + - le32_to_cpu(cp_hdr->jt_offset) * 4), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) { - ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + switch (ucode->ucode_id) { + case AMDGPU_UCODE_ID_CP_MEC1: + case AMDGPU_UCODE_ID_CP_MEC2: + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - + le32_to_cpu(cp_hdr->jt_size) * 4; + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + case AMDGPU_UCODE_ID_CP_MEC1_JT: + case AMDGPU_UCODE_ID_CP_MEC2_JT: + ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4; + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes) + + le32_to_cpu(cp_hdr->jt_offset) * 4; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: + ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; + ucode_addr = adev->gfx.rlc.save_restore_list_cntl; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: + ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes; + ucode_addr = adev->gfx.rlc.save_restore_list_gpm; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: + ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes; + ucode_addr = adev->gfx.rlc.save_restore_list_srm; + break; + case AMDGPU_UCODE_ID_RLC_IRAM: + ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes; + ucode_addr = adev->gfx.rlc.rlc_iram_ucode; + break; + case AMDGPU_UCODE_ID_RLC_DRAM: + ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes; + ucode_addr = adev->gfx.rlc.rlc_dram_ucode; + break; + case AMDGPU_UCODE_ID_CP_MES: + ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(mes_hdr->mes_ucode_offset_bytes); + break; + case AMDGPU_UCODE_ID_CP_MES_DATA: + ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes); + break; + case AMDGPU_UCODE_ID_DMCU_ERAM: + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(dmcu_hdr->intv_size_bytes); - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) { - ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes) + - le32_to_cpu(dmcu_hdr->intv_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) { - ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes); - memcpy(ucode->kaddr, - (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) { - ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) { - ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { - ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_IRAM) { - ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.rlc_iram_ucode, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_DRAM) { - ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.rlc_dram_ucode, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES) { - ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); - memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data + - le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA) { - ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); - memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data + - le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)), - ucode->ucode_size); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + case AMDGPU_UCODE_ID_DMCU_INTV: + ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes) + + le32_to_cpu(dmcu_hdr->intv_offset_bytes); + break; + case AMDGPU_UCODE_ID_DMCUB: + ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + default: + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + } + } else { + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); } + memcpy(ucode->kaddr, ucode_addr, ucode->ucode_size); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h new file mode 100644 index 000000000000..919d9d401750 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h @@ -0,0 +1,51 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/ioctl.h> + +/* + * MMIO debugfs IOCTL structure + */ +struct amdgpu_debugfs_regs2_iocdata { + __u32 use_srbm, use_grbm, pg_lock; + struct { + __u32 se, sh, instance; + } grbm; + struct { + __u32 me, pipe, queue, vmid; + } srbm; +}; + +/* + * MMIO debugfs state data (per file* handle) + */ +struct amdgpu_debugfs_regs2_data { + struct amdgpu_device *adev; + struct mutex lock; + struct amdgpu_debugfs_regs2_iocdata id; +}; + +enum AMDGPU_DEBUGFS_REGS2_CMDS { + AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0, +}; + +#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d451c359606a..8a26459bd80b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -134,6 +134,51 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); +static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo); + +static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev, + uint32_t size, + struct amdgpu_bo **bo_ptr) +{ + struct ttm_operation_ctx ctx = { true, false }; + struct amdgpu_bo *bo = NULL; + void *addr; + int r; + + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &bo, NULL, &addr); + if (r) + return r; + + if (adev->uvd.address_64_bit) + goto succ; + + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + amdgpu_uvd_force_into_uvd_segment(bo); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + goto err; + r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM); + if (r) + goto err_pin; + r = amdgpu_bo_kmap(bo, &addr); + if (r) + goto err_kmap; +succ: + amdgpu_bo_unreserve(bo); + *bo_ptr = bo; + return 0; +err_kmap: + amdgpu_bo_unpin(bo); +err_pin: +err: + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + return r; +} int amdgpu_uvd_sw_init(struct amdgpu_device *adev) { @@ -302,6 +347,10 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; + r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo); + if (r) + return r; + switch (adev->asic_type) { case CHIP_TONGA: adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; @@ -324,6 +373,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { + void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo); int i, j; drm_sched_entity_destroy(&adev->uvd.entity); @@ -342,6 +392,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); } + amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); release_firmware(adev->uvd.fw); return 0; @@ -1080,23 +1131,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, unsigned offset_idx = 0; unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; - amdgpu_bo_kunmap(bo); - amdgpu_bo_unpin(bo); - - if (!ring->adev->uvd.address_64_bit) { - struct ttm_operation_ctx ctx = { true, false }; - - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); - amdgpu_uvd_force_into_uvd_segment(bo); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) - goto err; - } - r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) - goto err; + return r; if (adev->asic_type >= CHIP_VEGA10) { offset_idx = 1 + ring->me; @@ -1147,9 +1185,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err_free; } + amdgpu_bo_reserve(bo, true); amdgpu_bo_fence(bo, f, false); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); if (fence) *fence = dma_fence_get(f); @@ -1159,10 +1197,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, err_free: amdgpu_job_free(job); - -err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -1173,16 +1207,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = adev->uvd.ib_bo; uint32_t *msg; - int r, i; - - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &bo, NULL, (void **)&msg); - if (r) - return r; + int i; + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD create msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000000); @@ -1199,6 +1228,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, msg[i] = cpu_to_le32(0x0); return amdgpu_uvd_send_msg(ring, bo, true, fence); + } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, @@ -1209,12 +1239,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, uint32_t *msg; int r, i; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &bo, NULL, (void **)&msg); - if (r) - return r; + if (direct) { + bo = adev->uvd.ib_bo; + } else { + r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo); + if (r) + return r; + } + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD destroy msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000002); @@ -1223,7 +1256,12 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 4; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_uvd_send_msg(ring, bo, direct, fence); + r = amdgpu_uvd_send_msg(ring, bo, direct, fence); + + if (!direct) + amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + + return r; } static void amdgpu_uvd_idle_work_handler(struct work_struct *work) @@ -1298,10 +1336,17 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *fence; long r; - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + r = amdgpu_uvd_get_create_msg(ring, 1, &fence); if (r) goto error; + r = dma_fence_wait_timeout(fence, false, timeout); + dma_fence_put(fence); + if (r == 0) + r = -ETIMEDOUT; + if (r < 0) + goto error; + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index edbb8194ee81..76ac9699885d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -68,6 +68,7 @@ struct amdgpu_uvd { /* store image width to adjust nb memory state */ unsigned decode_image_width; uint32_t keyselect; + struct amdgpu_bo *ib_bo; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 8e8dee9fac9f..caa4d3420e00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -82,7 +82,6 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_vce_idle_work_handler(struct work_struct *work); static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, struct dma_fence **fence); static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence); @@ -441,12 +440,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * Open up a stream for HW test */ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; + struct amdgpu_ib ib_msg; struct dma_fence *f = NULL; uint64_t addr; int i, r; @@ -456,9 +455,17 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, if (r) return r; - ib = &job->ibs[0]; + memset(&ib_msg, 0, sizeof(ib_msg)); + /* only one gpu page is needed, alloc +1 page to make addr aligned. */ + r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + &ib_msg); + if (r) + goto err; - addr = amdgpu_bo_gpu_offset(bo); + ib = &job->ibs[0]; + /* let addr point to page boundary */ + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr); /* stitch together an VCE create msg */ ib->length_dw = 0; @@ -498,6 +505,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; r = amdgpu_job_submit_direct(job, ring, &f); + amdgpu_ib_free(ring->adev, &ib_msg, f); if (r) goto err; @@ -1134,20 +1142,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; long r; /* skip vce ring1/2 ib test for now, since it's not reliable */ if (ring != &ring->adev->vce.ring[0]) return 0; - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); - if (r) - return r; - - r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL); + r = amdgpu_vce_get_create_msg(ring, 1, NULL); if (r) goto error; @@ -1163,7 +1164,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, NULL); return r; } + +enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring) +{ + switch(ring) { + case 0: + return AMDGPU_RING_PRIO_0; + case 1: + return AMDGPU_RING_PRIO_1; + case 2: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index d6d83a3ec803..be4a6e773c5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -71,5 +71,6 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring); unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring); unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring); +enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 008a308a4eca..b60b8fe5bf67 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -541,15 +541,14 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr; - void *msg = NULL; + uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); int i, r; r = amdgpu_job_alloc_with_ib(adev, 64, @@ -558,8 +557,6 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, goto err; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); - msg = amdgpu_bo_kptr(bo); ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); ib->ptr[1] = addr; ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); @@ -576,9 +573,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, if (r) goto err_free; - amdgpu_bo_fence(bo, f, false); - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + amdgpu_ib_free(adev, ib_msg, f); if (fence) *fence = dma_fence_get(f); @@ -588,27 +583,26 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, err_free: amdgpu_job_free(job); - err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + amdgpu_ib_free(adev, ib_msg, f); return r; } static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo **bo) + struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; uint32_t *msg; int r, i; - *bo = NULL; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - bo, NULL, (void **)&msg); + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + ib); if (r) return r; + msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000038); msg[2] = cpu_to_le32(0x00000001); @@ -630,19 +624,20 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand } static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo **bo) + struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; uint32_t *msg; int r, i; - *bo = NULL; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - bo, NULL, (void **)&msg); + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + ib); if (r) return r; + msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000018); msg[2] = cpu_to_le32(0x00000000); @@ -658,21 +653,21 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo; + struct amdgpu_ib ib; long r; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_send_msg(ring, bo, NULL); + r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_send_msg(ring, bo, &fence); + r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence); if (r) goto error; @@ -688,8 +683,8 @@ error: } static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct dma_fence **fence) + struct amdgpu_ib *ib_msg, + struct dma_fence **fence) { struct amdgpu_vcn_decode_buffer *decode_buffer = NULL; const unsigned int ib_size_dw = 64; @@ -697,7 +692,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr; + uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); int i, r; r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, @@ -706,7 +701,6 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, goto err; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; @@ -726,9 +720,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, if (r) goto err_free; - amdgpu_bo_fence(bo, f, false); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_ib_free(adev, ib_msg, f); if (fence) *fence = dma_fence_get(f); @@ -738,31 +730,29 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, err_free: amdgpu_job_free(job); - err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_ib_free(adev, ib_msg, f); return r; } int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo; + struct amdgpu_ib ib; long r; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL); + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence); + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence); if (r) goto error; @@ -809,7 +799,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { const unsigned ib_size_dw = 16; @@ -825,7 +815,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand return r; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; @@ -863,7 +853,7 @@ err: } static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { const unsigned ib_size_dw = 16; @@ -879,7 +869,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han return r; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; @@ -918,21 +908,23 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { + struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_ib ib; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_IB_POOL_DIRECT, + &ib); if (r) return r; - r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL); + r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence); + r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence); if (r) goto error; @@ -943,9 +935,22 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; error: + amdgpu_ib_free(adev, &ib, fence); dma_fence_put(fence); - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, NULL); return r; } + +enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) +{ + switch(ring) { + case 0: + return AMDGPU_RING_PRIO_0; + case 1: + return AMDGPU_RING_PRIO_1; + case 2: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index d74c62b49795..795cbaa02ff8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -308,4 +308,6 @@ int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout); +enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index ca058fbcccd4..88c4177b708a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -532,9 +532,12 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version); - POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd.fw_version); - POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ras.feature_version); - POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.xgmi.feature_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, + adev->psp.asd_context.bin_desc.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, + adev->psp.ras_context.context.bin_desc.feature_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, + adev->psp.xgmi_context.context.bin_desc.feature_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6b15cad78de9..a1ddf74bbdba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -800,7 +800,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo = &vmbo->bo; unsigned entries, ats_entries; uint64_t addr; - int r; + int r, idx; /* Figure out our place in the hierarchy */ if (ancestor->parent) { @@ -845,9 +845,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, return r; } + if (!drm_dev_enter(&adev->ddev, &idx)) + return -ENODEV; + r = vm->update_funcs->map_table(vmbo); if (r) - return r; + goto exit; memset(¶ms, 0, sizeof(params)); params.adev = adev; @@ -856,7 +859,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); if (r) - return r; + goto exit; addr = 0; if (ats_entries) { @@ -872,7 +875,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, r = vm->update_funcs->update(¶ms, vmbo, addr, 0, ats_entries, value, flags); if (r) - return r; + goto exit; addr += ats_entries * 8; } @@ -895,10 +898,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries, value, flags); if (r) - return r; + goto exit; } - return vm->update_funcs->commit(¶ms, NULL); + r = vm->update_funcs->commit(¶ms, NULL); +exit: + drm_dev_exit(idx); + return r; } /** @@ -1384,11 +1390,14 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate) { struct amdgpu_vm_update_params params; - int r; + int r, idx; if (list_empty(&vm->relocated)) return 0; + if (!drm_dev_enter(&adev->ddev, &idx)) + return -ENODEV; + memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; @@ -1396,7 +1405,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); if (r) - return r; + goto exit; while (!list_empty(&vm->relocated)) { struct amdgpu_vm_bo_base *entry; @@ -1414,10 +1423,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, r = vm->update_funcs->commit(¶ms, &vm->last_update); if (r) goto error; + drm_dev_exit(idx); return 0; error: amdgpu_vm_invalidate_pds(adev, vm); +exit: + drm_dev_exit(idx); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index a434c71fde8e..995899191288 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -204,8 +204,10 @@ struct amd_sriov_msg_pf2vf_info { } mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST]; /* UUID info */ struct amd_sriov_msg_uuid_info uuid_info; + /* pcie atomic Ops info */ + uint32_t pcie_atomic_ops_enabled_flags; /* reserved */ - uint32_t reserved[256 - 47]; + uint32_t reserved[256 - 48]; }; struct amd_sriov_msg_vf2pf_info_header { diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c index 058b65730a84..8f7107d392af 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c @@ -52,7 +52,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = { .ras_fini = mca_v3_0_mp0_ras_fini, .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count, .query_ras_error_address = NULL, - .ras_block = AMDGPU_RAS_BLOCK__MP0, + .ras_block = AMDGPU_RAS_BLOCK__MCA, + .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP0, .sysfs_name = "mp0_err_count", }; @@ -79,7 +80,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = { .ras_fini = mca_v3_0_mp1_ras_fini, .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count, .query_ras_error_address = NULL, - .ras_block = AMDGPU_RAS_BLOCK__MP1, + .ras_block = AMDGPU_RAS_BLOCK__MCA, + .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP1, .sysfs_name = "mp1_err_count", }; @@ -106,7 +108,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = { .ras_fini = mca_v3_0_mpio_ras_fini, .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count, .query_ras_error_address = NULL, - .ras_block = AMDGPU_RAS_BLOCK__MPIO, + .ras_block = AMDGPU_RAS_BLOCK__MCA, + .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MPIO, .sysfs_name = "mpio_err_count", }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index f50045cebd44..91b3afa946f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -387,13 +387,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device "errors detected in %s block, " "no user action is needed.\n", obj->err_data.ce_count, - ras_block_str(adev->nbio.ras_if->block)); + get_ras_block_str(adev->nbio.ras_if)); if (err_data.ue_count) dev_info(adev->dev, "%ld uncorrectable hardware " "errors detected in %s block\n", obj->err_data.ue_count, - ras_block_str(adev->nbio.ras_if->block)); + get_ras_block_str(adev->nbio.ras_if)); } dev_info(adev->dev, "RAS controller interrupt triggered " @@ -566,7 +566,9 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a return r; } -#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 +#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 +#define smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE 0x13b20030 +#define smnRAS_GLOBAL_STATUS_LO_ALDE 0x13b20020 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) @@ -575,12 +577,20 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, uint32_t corr, fatal, non_fatal; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); + if (adev->asic_type == CHIP_ALDEBARAN) + global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE); + else + global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); + corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr); fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrNonFatal); - parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2); + + if (adev->asic_type == CHIP_ALDEBARAN) + parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE); + else + parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2); if (corr) err_data->ce_count++; @@ -589,13 +599,21 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, if (corr || fatal || non_fatal) { central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS); + /* clear error status register */ - WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE, global_sts); + else + WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); if (fatal) + { /* clear parity fatal error indication field */ - WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, - parity_sts); + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE, parity_sts); + else + WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, parity_sts); + } if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, BIFL_RasContller_Intr_Recv)) { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 5872d68ed13d..59644015dfc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -84,28 +84,28 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; - adev->psp.hdcp.feature_version = + adev->psp.hdcp_context.context.bin_desc.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version); - adev->psp.hdcp.size_bytes = + adev->psp.hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); - adev->psp.hdcp.start_addr = + adev->psp.hdcp_context.context.bin_desc.start_addr = (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); - adev->psp.dtm.feature_version = + adev->psp.dtm_context.context.bin_desc.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version); - adev->psp.dtm.size_bytes = + adev->psp.dtm_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); - adev->psp.dtm.start_addr = - (uint8_t *)adev->psp.hdcp.start_addr + + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + le32_to_cpu(ta_hdr->dtm.offset_bytes); - adev->psp.securedisplay.feature_version = + adev->psp.securedisplay_context.context.bin_desc.feature_version = le32_to_cpu(ta_hdr->securedisplay.fw_version); - adev->psp.securedisplay.size_bytes = + adev->psp.securedisplay_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->securedisplay.size_bytes); - adev->psp.securedisplay.start_addr = - (uint8_t *)adev->psp.hdcp.start_addr + + adev->psp.securedisplay_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + le32_to_cpu(ta_hdr->securedisplay.offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 29bf9f09944b..3c02e75fd366 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -151,14 +151,20 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) goto out2; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version); - adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes); - adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr + + adev->psp.xgmi_context.context.bin_desc.feature_version = + le32_to_cpu(ta_hdr->xgmi.fw_version); + adev->psp.xgmi_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->xgmi.size_bytes); + adev->psp.xgmi_context.context.bin_desc.start_addr = + (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version); - adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes); - adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr + + adev->psp.ras_context.context.bin_desc.feature_version = + le32_to_cpu(ta_hdr->ras.fw_version); + adev->psp.ras_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->ras.size_bytes); + adev->psp.ras_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + le32_to_cpu(ta_hdr->ras.offset_bytes); } break; @@ -186,16 +192,24 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) goto out2; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version); - adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); - adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr + - le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + adev->psp.hdcp_context.context.bin_desc.feature_version = + le32_to_cpu(ta_hdr->hdcp.fw_version); + adev->psp.hdcp_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->hdcp.size_bytes); + adev->psp.hdcp_context.context.bin_desc.start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu( + ta_hdr->header.ucode_array_offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version); - adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); - adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr + + adev->psp.dtm_context.context.bin_desc.feature_version = + le32_to_cpu(ta_hdr->dtm.fw_version); + adev->psp.dtm_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->dtm.size_bytes); + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context + .bin_desc.start_addr + le32_to_cpu(ta_hdr->dtm.offset_bytes); } break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index cc649406234b..281bc4d7f0a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -84,22 +84,22 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; - adev->psp.hdcp.feature_version = + adev->psp.hdcp_context.context.bin_desc.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version); - adev->psp.hdcp.size_bytes = + adev->psp.hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); - adev->psp.hdcp.start_addr = + adev->psp.hdcp_context.context.bin_desc.start_addr = (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.dtm.feature_version = + adev->psp.dtm_context.context.bin_desc.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version); - adev->psp.dtm.size_bytes = + adev->psp.dtm_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); - adev->psp.dtm.start_addr = - (uint8_t *)adev->psp.hdcp.start_addr + + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + le32_to_cpu(ta_hdr->dtm.offset_bytes); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 779f5c911e11..e4a96e7e386d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -375,10 +375,10 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring, */ static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring) { - uint32_t gcr_cntl = - SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV | - SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | - SDMA_GCR_GLI_INV(1); + uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | + SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV | + SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | + SDMA_GCR_GLI_INV(1); /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0fc97c364fd7..15e295a1412c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -85,6 +85,8 @@ #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 +static const struct amd_ip_funcs soc15_common_ip_funcs; + /* Vega, Raven, Arcturus */ static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = { @@ -1645,7 +1647,7 @@ static int soc15_common_set_powergating_state(void *handle, return 0; } -const struct amd_ip_funcs soc15_common_ip_funcs = { +static const struct amd_ip_funcs soc15_common_ip_funcs = { .name = "soc15_common", .early_init = soc15_common_early_init, .late_init = soc15_common_late_init, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 034cfdfc4dbe..a025339ac5e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -31,8 +31,6 @@ #define SOC15_FLUSH_GPU_TLB_NUM_WREG 6 #define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3 -extern const struct amd_ip_funcs soc15_common_ip_funcs; - struct soc15_reg_golden { u32 hwip; u32 instance; diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 0f214a398dd8..de24a0a97d5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -38,9 +38,8 @@ enum ras_command { TA_RAS_COMMAND__TRIGGER_ERROR, }; -enum ta_ras_status -{ - TA_RAS_STATUS__SUCCESS = 0x00, +enum ta_ras_status { + TA_RAS_STATUS__SUCCESS = 0x0000, TA_RAS_STATUS__RESET_NEEDED = 0xA001, TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002, TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003, @@ -55,7 +54,12 @@ enum ta_ras_status TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C, TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D, TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E, - TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F + TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F, + TA_RAS_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010, + TA_RAS_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011, + TA_RAS_STATUS__ERROR_RAS_READ_WRITE = 0xA012, + TA_RAS_STATUS__ERROR_NULL_PTR = 0xA013, + TA_RAS_STATUS__ERROR_UNSUPPORTED_IP = 0xA014 }; enum ta_ras_block { @@ -73,9 +77,18 @@ enum ta_ras_block { TA_RAS_BLOCK__MP0, TA_RAS_BLOCK__MP1, TA_RAS_BLOCK__FUSE, + TA_RAS_BLOCK__MCA, TA_NUM_BLOCK_MAX }; +enum ta_ras_mca_block { + TA_RAS_MCA_BLOCK__MP0 = 0, + TA_RAS_MCA_BLOCK__MP1 = 1, + TA_RAS_MCA_BLOCK__MPIO = 2, + TA_RAS_MCA_BLOCK__IOHC = 3, + TA_MCA_NUM_BLOCK_MAX +}; + enum ta_ras_error_type { TA_RAS_ERROR__NONE = 0, TA_RAS_ERROR__PARITY = 1, @@ -105,17 +118,15 @@ struct ta_ras_trigger_error_input { uint64_t value; // method if error injection. i.e persistent, coherent etc. }; -struct ta_ras_init_flags -{ - uint8_t poison_mode_en; - uint8_t dgpu_mode; +struct ta_ras_init_flags { + uint8_t poison_mode_en; + uint8_t dgpu_mode; }; -struct ta_ras_output_flags -{ - uint8_t ras_init_success_flag; - uint8_t err_inject_switch_disable_flag; - uint8_t reg_access_failure_flag; +struct ta_ras_output_flags { + uint8_t ras_init_success_flag; + uint8_t err_inject_switch_disable_flag; + uint8_t reg_access_failure_flag; }; /* Common input structure for RAS callbacks */ @@ -126,14 +137,13 @@ union ta_ras_cmd_input { struct ta_ras_disable_features_input disable_features; struct ta_ras_trigger_error_input trigger_error; - uint32_t reserve_pad[256]; + uint32_t reserve_pad[256]; }; -union ta_ras_cmd_output -{ - struct ta_ras_output_flags flags; +union ta_ras_cmd_output { + struct ta_ras_output_flags flags; - uint32_t reserve_pad[256]; + uint32_t reserve_pad[256]; }; /* Shared Memory structures */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 7232241e3bfb..0fef925b6602 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -698,6 +698,19 @@ static int uvd_v3_1_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v3_1_stop(adev); + + return 0; +} + +static int uvd_v3_1_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -722,17 +735,6 @@ static int uvd_v3_1_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v3_1_stop(adev); - - return 0; -} - -static int uvd_v3_1_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v3_1_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 52d6de969f46..c108b8381795 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -212,6 +212,19 @@ static int uvd_v4_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v4_2_stop(adev); + + return 0; +} + +static int uvd_v4_2_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -236,17 +249,6 @@ static int uvd_v4_2_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v4_2_stop(adev); - - return 0; -} - -static int uvd_v4_2_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v4_2_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index db6d06758e4d..563493d1f830 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -210,6 +210,19 @@ static int uvd_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v5_0_stop(adev); + + return 0; +} + +static int uvd_v5_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -234,17 +247,6 @@ static int uvd_v5_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v5_0_stop(adev); - - return 0; -} - -static int uvd_v5_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v5_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index bc571833632e..d5d023a24269 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -332,15 +332,9 @@ err: static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); - if (r) - return r; - r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; @@ -357,9 +351,6 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unpin(bo); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index b6e82d75561f..b483f03b4591 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -338,15 +338,9 @@ err: static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); - if (r) - return r; - r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; @@ -363,9 +357,6 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unpin(bo); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -606,6 +597,23 @@ static int uvd_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (!amdgpu_sriov_vf(adev)) + uvd_v7_0_stop(adev); + else { + /* full access mode, so don't touch any UVD register */ + DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); + } + + return 0; +} + +static int uvd_v7_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -630,21 +638,6 @@ static int uvd_v7_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (!amdgpu_sriov_vf(adev)) - uvd_v7_0_stop(adev); - else { - /* full access mode, so don't touch any UVD register */ - DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); - } - - return 0; -} - -static int uvd_v7_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v7_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index b70c17f0c52e..67eb01fef789 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -431,10 +431,12 @@ static int vce_v2_0_sw_init(void *handle) return r; for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -479,6 +481,17 @@ static int vce_v2_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vce.idle_work); + + return 0; +} + +static int vce_v2_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -502,14 +515,6 @@ static int vce_v2_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - return 0; -} - -static int vce_v2_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vce_v2_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 9de66893ccd6..142e291983b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -440,10 +440,12 @@ static int vce_v3_0_sw_init(void *handle) return r; for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -490,6 +492,21 @@ static int vce_v3_0_hw_fini(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vce.idle_work); + + r = vce_v3_0_wait_for_idle(handle); + if (r) + return r; + + vce_v3_0_stop(adev); + return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); +} + +static int vce_v3_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -513,19 +530,6 @@ static int vce_v3_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - r = vce_v3_0_wait_for_idle(handle); - if (r) - return r; - - vce_v3_0_stop(adev); - return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); -} - -static int vce_v3_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vce_v3_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index fec902b800c2..226b79254db8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -463,6 +463,8 @@ static int vce_v4_0_sw_init(void *handle) } for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); if (amdgpu_sriov_vf(adev)) { @@ -478,7 +480,7 @@ static int vce_v4_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -542,29 +544,8 @@ static int vce_v4_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* - * Proper cleanups before halting the HW engine: - * - cancel the delayed idle work - * - enable powergating - * - enable clockgating - * - disable dpm - * - * TODO: to align with the VCN implementation, move the - * jobs for clockgating/powergating/dpm setting to - * ->set_powergating_state(). - */ cancel_delayed_work_sync(&adev->vce.idle_work); - if (adev->pm.dpm_enabled) { - amdgpu_dpm_enable_vce(adev, false); - } else { - amdgpu_asic_set_vce_clocks(adev, 0, 0); - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); - } - if (!amdgpu_sriov_vf(adev)) { /* vce_v4_0_wait_for_idle(handle); */ vce_v4_0_stop(adev); @@ -594,6 +575,29 @@ static int vce_v4_0_suspend(void *handle) drm_dev_exit(idx); } + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->vce.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_vce(adev, false); + } else { + amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + } + r = vce_v4_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 121ee9f2b8d1..6c11739270c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -145,10 +145,12 @@ static int vcn_v1_0_sw_init(void *handle) SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); + ring = &adev->vcn.inst->ring_enc[i]; sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index f4686e918e0d..a03c0fc8338f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -22,6 +22,7 @@ */ #include <linux/firmware.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vcn.h" @@ -159,6 +160,8 @@ static int vcn_v2_0_sw_init(void *handle) adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); + ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; if (!amdgpu_sriov_vf(adev)) @@ -167,7 +170,7 @@ static int vcn_v2_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -192,11 +195,14 @@ static int vcn_v2_0_sw_init(void *handle) */ static int vcn_v2_0_sw_fini(void *handle) { - int r; + int r, idx; struct amdgpu_device *adev = (struct amdgpu_device *)handle; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; - fw_shared->present_flag_0 = 0; + if (drm_dev_enter(&adev->ddev, &idx)) { + fw_shared->present_flag_0 = 0; + drm_dev_exit(idx); + } amdgpu_virt_free_mm_table(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index e0c0c3734432..1780ad1eacd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -22,6 +22,7 @@ */ #include <linux/firmware.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vcn.h" @@ -194,6 +195,8 @@ static int vcn_v2_5_sw_init(void *handle) return r; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); + ring = &adev->vcn.inst[j].ring_enc[i]; ring->use_doorbell = true; @@ -203,7 +206,7 @@ static int vcn_v2_5_sw_init(void *handle) sprintf(ring->name, "vcn_enc_%d.%d", j, i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -233,17 +236,21 @@ static int vcn_v2_5_sw_init(void *handle) */ static int vcn_v2_5_sw_fini(void *handle) { - int i, r; + int i, r, idx; struct amdgpu_device *adev = (struct amdgpu_device *)handle; volatile struct amdgpu_fw_shared *fw_shared; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; - fw_shared->present_flag_0 = 0; + if (drm_dev_enter(&adev->ddev, &idx)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared->present_flag_0 = 0; + } + drm_dev_exit(idx); } + if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3d18aab88b4e..b1af70d49c6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -224,6 +224,8 @@ static int vcn_v3_0_sw_init(void *handle) return r; for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j); + /* VCN ENC TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); @@ -239,8 +241,7 @@ static int vcn_v3_0_sw_init(void *handle) } sprintf(ring->name, "vcn_enc_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, - AMDGPU_RING_PRIO_DEFAULT, - &adev->vcn.inst[i].sched_score); + hw_prio, &adev->vcn.inst[i].sched_score); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 98d1b3ab3a46..c2a4d920da40 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -971,7 +971,6 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { - svm_migrate_fini((struct amdgpu_device *)kfd->kgd); device_queue_manager_uninit(kfd->dqm); kfd_interrupt_exit(kfd); kfd_topology_remove_device(kfd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index dab290a4d19d..4a16e3c257b9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -891,9 +891,16 @@ int svm_migrate_init(struct amdgpu_device *adev) pgmap->ops = &svm_migrate_pgmap_ops; pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; + + /* Device manager releases device-specific resources, memory region and + * pgmap when driver disconnects from device. + */ r = devm_memremap_pages(adev->dev, pgmap); if (IS_ERR(r)) { pr_err("failed to register HMM device memory\n"); + + /* Disable SVM support capability */ + pgmap->type = 0; devm_release_mem_region(adev->dev, res->start, res->end - res->start + 1); return PTR_ERR(r); @@ -908,12 +915,3 @@ int svm_migrate_init(struct amdgpu_device *adev) return 0; } - -void svm_migrate_fini(struct amdgpu_device *adev) -{ - struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap; - - devm_memunmap_pages(adev->dev, pgmap); - devm_release_mem_region(adev->dev, pgmap->range.start, - pgmap->range.end - pgmap->range.start + 1); -} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index 0de76b5d4973..2f5b3394c9ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -47,7 +47,6 @@ unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); int svm_migrate_init(struct amdgpu_device *adev); -void svm_migrate_fini(struct amdgpu_device *adev); #else @@ -55,10 +54,6 @@ static inline int svm_migrate_init(struct amdgpu_device *adev) { return 0; } -static inline void svm_migrate_fini(struct amdgpu_device *adev) -{ - /* empty */ -} #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9fc8021bb0ab..9d0f65a90002 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -118,6 +118,13 @@ static void svm_range_remove_notifier(struct svm_range *prange) mmu_interval_notifier_remove(&prange->notifier); } +static bool +svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr && !dma_mapping_error(dev, dma_addr) && + !(dma_addr & SVM_RANGE_VRAM_DOMAIN); +} + static int svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, unsigned long offset, unsigned long npages, @@ -139,8 +146,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, addr += offset; for (i = 0; i < npages; i++) { - if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]), - "leaking dma mapping\n")) + if (svm_is_valid_dma_mapping_addr(dev, addr[i])) dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); page = hmm_pfn_to_page(hmm_pfns[i]); @@ -209,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, return; for (i = offset; i < offset + npages; i++) { - if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) + if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i])) continue; pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); @@ -1165,7 +1171,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned long last_start; int last_domain; int r = 0; - int64_t i; + int64_t i, j; last_start = prange->start + offset; @@ -1178,7 +1184,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, for (i = offset; i < offset + npages; i++) { last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; - if ((prange->start + i) < prange->last && + + /* Collect all pages in the same address range and memory domain + * that can be mapped with a single call to update mapping. + */ + if (i < offset + npages - 1 && last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN)) continue; @@ -1201,6 +1211,10 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, NULL, dma_addr, &vm->last_update, &table_freed); + + for (j = last_start - prange->start; j <= i; j++) + dma_addr[j] |= last_domain; + if (r) { pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); goto out; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 66c799f5c7cf..07adac1a8c42 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -215,6 +215,8 @@ static void handle_cursor_update(struct drm_plane *plane, static const struct drm_format_info * amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); + static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); @@ -618,6 +620,116 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) } #endif +/** + * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * Dmub AUX or SET_CONFIG command completion processing callback + * Copies dmub notification to DM which is to be read by AUX command. + * issuing thread and also signals the event to wake up the thread. + */ +void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +{ + if (adev->dm.dmub_notify) + memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); + if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) + complete(&adev->dm.dmub_aux_transfer_done); +} + +/** + * dmub_hpd_callback - DMUB HPD interrupt processing callback. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * Dmub Hpd interrupt processing callback. Gets displayindex through the + * ink index and calls helper to do the processing. + */ +void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct dc_link *link; + uint8_t link_index = 0; + struct drm_device *dev = adev->dm.ddev; + + if (adev == NULL) + return; + + if (notify == NULL) { + DRM_ERROR("DMUB HPD callback notification was NULL"); + return; + } + + if (notify->link_index > adev->dm.dc->link_count) { + DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); + return; + } + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + link_index = notify->link_index; + + link = adev->dm.dc->links[link_index]; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (link && aconnector->dc_link == link) { + DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); + handle_hpd_irq_helper(aconnector); + break; + } + } + drm_connector_list_iter_end(&iter); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + +} + +/** + * register_dmub_notify_callback - Sets callback for DMUB notify + * @adev: amdgpu_device pointer + * @type: Type of dmub notification + * @callback: Dmub interrupt callback function + * @dmub_int_thread_offload: offload indicator + * + * API to register a dmub callback handler for a dmub notification + * Also sets indicator whether callback processing to be offloaded. + * to dmub interrupt handling thread + * Return: true if successfully registered, false if there is existing registration + */ +bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, +dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) +{ + if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { + adev->dm.dmub_callback[type] = callback; + adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; + } else + return false; + + return true; +} + +static void dm_handle_hpd_work(struct work_struct *work) +{ + struct dmub_hpd_work *dmub_hpd_wrk; + + dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); + + if (!dmub_hpd_wrk->dmub_notify) { + DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); + return; + } + + if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { + dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, + dmub_hpd_wrk->dmub_notify); + } + kfree(dmub_hpd_wrk); + +} + #define DMUB_TRACE_MAX_READ 64 /** * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt @@ -634,18 +746,33 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) struct amdgpu_display_manager *dm = &adev->dm; struct dmcub_trace_buf_entry entry = { 0 }; uint32_t count = 0; + struct dmub_hpd_work *dmub_hpd_wrk; if (dc_enable_dmub_notifications(adev->dm.dc)) { + dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); + if (!dmub_hpd_wrk) { + DRM_ERROR("Failed to allocate dmub_hpd_wrk"); + return; + } + INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); + if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { do { dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); - } while (notify.pending_notification); + if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) { + DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type, + ARRAY_SIZE(dm->dmub_thread_offload)); + continue; + } + if (dm->dmub_thread_offload[notify.type] == true) { + dmub_hpd_wrk->dmub_notify = ¬ify; + dmub_hpd_wrk->adev = adev; + queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); + } else { + dm->dmub_callback[notify.type](adev, ¬ify); + } - if (adev->dm.dmub_notify) - memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification)); - if (notify.type == DMUB_NOTIFICATION_AUX_REPLY) - complete(&adev->dm.dmub_aux_transfer_done); - // TODO : HPD Implementation + } while (notify.pending_notification); } else { DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); @@ -1083,6 +1210,83 @@ static void vblank_control_worker(struct work_struct *work) } #endif + +static void dm_handle_hpd_rx_offload_work(struct work_struct *work) +{ + struct hpd_rx_irq_offload_work *offload_work; + struct amdgpu_dm_connector *aconnector; + struct dc_link *dc_link; + struct amdgpu_device *adev; + enum dc_connection_type new_connection_type = dc_connection_none; + unsigned long flags; + + offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); + aconnector = offload_work->offload_wq->aconnector; + + if (!aconnector) { + DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); + goto skip; + } + + adev = drm_to_adev(aconnector->base.dev); + dc_link = aconnector->dc_link; + + mutex_lock(&aconnector->hpd_lock); + if (!dc_link_detect_sink(dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + mutex_unlock(&aconnector->hpd_lock); + + if (new_connection_type == dc_connection_none) + goto skip; + + if (amdgpu_in_reset(adev)) + goto skip; + + mutex_lock(&adev->dm.dc_lock); + if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) + dc_link_dp_handle_automated_test(dc_link); + else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && + hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && + dc_link_dp_allow_hpd_rx_irq(dc_link)) { + dc_link_dp_handle_link_loss(dc_link); + spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); + offload_work->offload_wq->is_handling_link_loss = false; + spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + } + mutex_unlock(&adev->dm.dc_lock); + +skip: + kfree(offload_work); + +} + +static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) +{ + int max_caps = dc->caps.max_links; + int i = 0; + struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; + + hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); + + if (!hpd_rx_offload_wq) + return NULL; + + + for (i = 0; i < max_caps; i++) { + hpd_rx_offload_wq[i].wq = + create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); + + if (hpd_rx_offload_wq[i].wq == NULL) { + DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); + return NULL; + } + + spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); + } + + return hpd_rx_offload_wq; +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -1201,6 +1405,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_hardware_init(adev->dm.dc); + adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); + if (!adev->dm.hpd_rx_offload_wq) { + DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); + goto error; + } + #if defined(CONFIG_DRM_AMD_DC_DCN) if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { struct dc_phy_addr_space_config pa_config; @@ -1253,7 +1463,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); goto error; } + + adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); + if (!adev->dm.delayed_hpd_wq) { + DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); + goto error; + } + amdgpu_dm_outbox_init(adev); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, + dmub_aux_setconfig_callback, false)) { + DRM_ERROR("amdgpu: fail to register dmub aux callback"); + goto error; + } + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + goto error; + } +#endif } if (amdgpu_dm_initialize_drm_device(adev)) { @@ -1335,6 +1563,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) if (dc_enable_dmub_notifications(adev->dm.dc)) { kfree(adev->dm.dmub_notify); adev->dm.dmub_notify = NULL; + destroy_workqueue(adev->dm.delayed_hpd_wq); + adev->dm.delayed_hpd_wq = NULL; } if (adev->dm.dmub_bo) @@ -1342,6 +1572,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); + if (adev->dm.hpd_rx_offload_wq) { + for (i = 0; i < adev->dm.dc->caps.max_links; i++) { + if (adev->dm.hpd_rx_offload_wq[i].wq) { + destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); + adev->dm.hpd_rx_offload_wq[i].wq = NULL; + } + } + + kfree(adev->dm.hpd_rx_offload_wq); + adev->dm.hpd_rx_offload_wq = NULL; + } + /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) dc_destroy(&adev->dm.dc); @@ -1978,6 +2220,16 @@ context_alloc_fail: return res; } +static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) +{ + int i; + + if (dm->hpd_rx_offload_wq) { + for (i = 0; i < dm->dc->caps.max_links; i++) + flush_workqueue(dm->hpd_rx_offload_wq[i].wq); + } +} + static int dm_suspend(void *handle) { struct amdgpu_device *adev = handle; @@ -1999,6 +2251,8 @@ static int dm_suspend(void *handle) amdgpu_dm_irq_suspend(adev); + hpd_rx_irq_work_suspend(dm); + return ret; } @@ -2009,6 +2263,8 @@ static int dm_suspend(void *handle) amdgpu_dm_irq_suspend(adev); + hpd_rx_irq_work_suspend(dm); + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); return 0; @@ -2155,7 +2411,7 @@ cleanup: return; } -static void dm_set_dpms_off(struct dc_link *link) +static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state) { struct dc_stream_state *stream_state; struct amdgpu_dm_connector *aconnector = link->priv; @@ -2176,6 +2432,7 @@ static void dm_set_dpms_off(struct dc_link *link) } stream_update.stream = stream_state; + acrtc_state->force_dpms_off = true; dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, stream_state, &stream_update, stream_state->ctx->dc->current_state); @@ -2613,20 +2870,22 @@ void amdgpu_dm_update_connector_after_detect( dc_sink_release(sink); } -static void handle_hpd_irq(void *param) +static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) { - struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); -#ifdef CONFIG_DRM_AMD_DC_HDCP struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); -#endif + struct dm_crtc_state *dm_crtc_state = NULL; if (adev->dm.disable_hpd_irq) return; + if (dm_con_state->base.state && dm_con_state->base.crtc) + dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state( + dm_con_state->base.state, + dm_con_state->base.crtc)); /* * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. @@ -2658,8 +2917,9 @@ static void handle_hpd_irq(void *param) } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { if (new_connection_type == dc_connection_none && - aconnector->dc_link->type == dc_connection_none) - dm_set_dpms_off(aconnector->dc_link); + aconnector->dc_link->type == dc_connection_none && + dm_crtc_state) + dm_set_dpms_off(aconnector->dc_link, dm_crtc_state); amdgpu_dm_update_connector_after_detect(aconnector); @@ -2674,7 +2934,15 @@ static void handle_hpd_irq(void *param) } -static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) +static void handle_hpd_irq(void *param) +{ + struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; + + handle_hpd_irq_helper(aconnector); + +} + +static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) { uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; uint8_t dret; @@ -2752,6 +3020,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); } +static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, + union hpd_irq_data hpd_irq_data) +{ + struct hpd_rx_irq_offload_work *offload_work = + kzalloc(sizeof(*offload_work), GFP_KERNEL); + + if (!offload_work) { + DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); + return; + } + + INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); + offload_work->data = hpd_irq_data; + offload_work->offload_wq = offload_wq; + + queue_work(offload_wq->wq, &offload_work->work); + DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); +} + static void handle_hpd_rx_irq(void *param) { struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; @@ -2763,14 +3050,16 @@ static void handle_hpd_rx_irq(void *param) enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); union hpd_irq_data hpd_irq_data; - bool lock_flag = 0; + bool link_loss = false; + bool has_left_work = false; + int idx = aconnector->base.index; + struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); if (adev->dm.disable_hpd_irq) return; - /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio * conflict, after implement i2c helper, this mutex should be @@ -2778,43 +3067,41 @@ static void handle_hpd_rx_irq(void *param) */ mutex_lock(&aconnector->hpd_lock); - read_hpd_rx_irq_data(dc_link, &hpd_irq_data); + result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, + &link_loss, true, &has_left_work); - if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || - (dc_link->type == dc_connection_mst_branch)) { - if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { - result = true; - dm_handle_hpd_rx_irq(aconnector); - goto out; - } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { - result = false; - dm_handle_hpd_rx_irq(aconnector); + if (!has_left_work) + goto out; + + if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + goto out; + } + + if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { + if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || + hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + dm_handle_mst_sideband_msg(aconnector); goto out; } - } - /* - * TODO: We need the lock to avoid touching DC state while it's being - * modified during automated compliance testing, or when link loss - * happens. While this should be split into subhandlers and proper - * interfaces to avoid having to conditionally lock like this in the - * outer layer, we need this workaround temporarily to allow MST - * lightup in some scenarios to avoid timeout. - */ - if (!amdgpu_in_reset(adev) && - (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) || - hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) { - mutex_lock(&adev->dm.dc_lock); - lock_flag = 1; - } + if (link_loss) { + bool skip = false; -#ifdef CONFIG_DRM_AMD_DC_HDCP - result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL); -#else - result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL); -#endif - if (!amdgpu_in_reset(adev) && lock_flag) - mutex_unlock(&adev->dm.dc_lock); + spin_lock(&offload_wq->offload_lock); + skip = offload_wq->is_handling_link_loss; + + if (!skip) + offload_wq->is_handling_link_loss = true; + + spin_unlock(&offload_wq->offload_lock); + + if (!skip) + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + + goto out; + } + } out: if (result && !is_mst_root_connector) { @@ -2899,6 +3186,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev) amdgpu_dm_irq_register_interrupt(adev, &int_params, handle_hpd_rx_irq, (void *) aconnector); + + if (adev->dm.hpd_rx_offload_wq) + adev->dm.hpd_rx_offload_wq[connector->index].aconnector = + aconnector; } } } @@ -4670,6 +4961,16 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs) | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_RETILE, 1) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | @@ -4680,6 +4981,17 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs)); add_modifier(mods, size, capacity, AMD_FMT_MOD | @@ -4761,10 +5073,27 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, if (modifier_has_dcc(modifier) && !force_disable_dcc) { uint64_t dcc_address = afb->address + afb->base.offsets[1]; + bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); + bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); dcc->enable = 1; dcc->meta_pitch = afb->base.pitches[1]; - dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); + dcc->independent_64b_blks = independent_64b_blks; + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { + if (independent_64b_blks && independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else if (independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_128b; + else if (independent_64b_blks && !independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } else { + if (independent_64b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } address->grph.meta_addr.low_part = lower_32_bits(dcc_address); address->grph.meta_addr.high_part = upper_32_bits(dcc_address); @@ -5600,9 +5929,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, { struct drm_connector *drm_connector = &aconnector->base; uint32_t link_bandwidth_kbps; + uint32_t max_dsc_target_bpp_limit_override = 0; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); + + if (stream->link && stream->link->local_sink) + max_dsc_target_bpp_limit_override = + stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; + /* Set DSC policy according to dsc_clock_en */ dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); @@ -5612,7 +5947,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, - 0, + max_dsc_target_bpp_limit_override, link_bandwidth_kbps, &stream->timing, &stream->timing.dsc_cfg)) { @@ -5963,6 +6298,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; + state->force_dpms_off = cur->force_dpms_off; /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; @@ -8679,7 +9015,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * and rely on sending it from software. */ if (acrtc_attach->base.state->event && - acrtc_state->active_planes > 0) { + acrtc_state->active_planes > 0 && + !acrtc_state->force_dpms_off) { drm_crtc_vblank_get(pcrtc); spin_lock_irqsave(&pcrtc->dev->event_lock, flags); @@ -10819,6 +11156,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; + struct dc_sink *sink; struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); @@ -10830,28 +11168,31 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, goto update; } - if (!edid) { + sink = amdgpu_dm_connector->dc_sink ? + amdgpu_dm_connector->dc_sink : + amdgpu_dm_connector->dc_em_sink; + + if (!edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; amdgpu_dm_connector->pixel_clock_mhz = 0; + connector->display_info.monitor_range.min_vfreq = 0; + connector->display_info.monitor_range.max_vfreq = 0; + freesync_capable = false; goto update; } dm_con_state = to_dm_connector_state(connector->state); - if (!amdgpu_dm_connector->dc_sink) { - DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); - goto update; - } if (!adev->dm.freesync_module) goto update; - if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT - || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { + if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT + || sink->sink_signal == SIGNAL_TYPE_EDP) { bool edid_check_required = false; if (edid) { @@ -10898,7 +11239,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, freesync_capable = true; } } - } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { timing = &edid->detailed_timings[i]; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d1d353a7c77d..a85b09986aab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -47,6 +47,8 @@ #define AMDGPU_DM_MAX_CRTC 6 #define AMDGPU_DM_MAX_NUM_EDP 2 + +#define AMDGPU_DMUB_NOTIFICATION_MAX 5 /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -86,6 +88,21 @@ struct dm_compressor_info { uint64_t gpu_addr; }; +typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); + +/** + * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ + * + * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq + * @dmub_notify: notification for callback function + * @adev: amdgpu_device pointer + */ +struct dmub_hpd_work { + struct work_struct handle_hpd_work; + struct dmub_notification *dmub_notify; + struct amdgpu_device *adev; +}; + /** * struct vblank_control_work - Work data for vblank control * @work: Kernel work data for the work event @@ -155,6 +172,48 @@ struct dal_allocation { }; /** + * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq + * offload work + */ +struct hpd_rx_irq_offload_work_queue { + /** + * @wq: workqueue structure to queue offload work. + */ + struct workqueue_struct *wq; + /** + * @offload_lock: To protect fields of offload work queue. + */ + spinlock_t offload_lock; + /** + * @is_handling_link_loss: Used to prevent inserting link loss event when + * we're handling link loss + */ + bool is_handling_link_loss; + /** + * @aconnector: The aconnector that this work queue is attached to + */ + struct amdgpu_dm_connector *aconnector; +}; + +/** + * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure + */ +struct hpd_rx_irq_offload_work { + /** + * @work: offload work + */ + struct work_struct work; + /** + * @data: reference irq data which is used while handling offload work + */ + union hpd_irq_data data; + /** + * @offload_wq: offload work queue that this work is queued to + */ + struct hpd_rx_irq_offload_work_queue *offload_wq; +}; + +/** * struct amdgpu_display_manager - Central amdgpu display manager device * * @dc: Display Core control structure @@ -190,9 +249,31 @@ struct amdgpu_display_manager { */ struct dmub_srv *dmub_srv; + /** + * @dmub_notify: + * + * Notification from DMUB. + */ + struct dmub_notification *dmub_notify; /** + * @dmub_callback: + * + * Callback functions to handle notification from DMUB. + */ + + dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; + + /** + * @dmub_thread_offload: + * + * Flag to indicate if callback is offload. + */ + + bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; + + /** * @dmub_fb_info: * * Framebuffer regions for the DMUB. @@ -422,7 +503,12 @@ struct amdgpu_display_manager { */ struct crc_rd_work *crc_rd_wrk; #endif - + /** + * @hpd_rx_offload_wq: + * + * Work queue to offload works of hpd_rx_irq + */ + struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq; /** * @mst_encoders: * @@ -439,6 +525,7 @@ struct amdgpu_display_manager { */ struct list_head da_list; struct completion dmub_aux_transfer_done; + struct workqueue_struct *delayed_hpd_wq; /** * @brightness: @@ -542,6 +629,8 @@ struct dm_crtc_state { bool dsc_force_changed; bool vrr_supported; + + bool force_dpms_off; struct mod_freesync_config freesync_config; struct dc_info_packet vrr_infopacket; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 87daa78a32b8..f3ada9b6be5a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -247,6 +247,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, { struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dc_link *link = connector->dc_link; + struct dc *dc = (struct dc *)link->dc; struct dc_link_settings prefer_link_settings; char *wr_buf = NULL; const uint32_t wr_buf_size = 40; @@ -313,7 +314,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, prefer_link_settings.lane_count = param[0]; prefer_link_settings.link_rate = param[1]; - dp_retrain_link_dp_test(link, &prefer_link_settings, false); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); kfree(wr_buf); return size; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index c5f1dc3b5961..5bfdc66b5867 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -448,6 +448,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; struct drm_connector_state *conn_state; + struct dc_sink *sink = NULL; + bool link_is_hdcp14 = false; if (config->dpms_off) { hdcp_remove_display(hdcp_work, link_index, aconnector); @@ -460,8 +462,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) display->index = aconnector->base.index; display->state = MOD_HDCP_DISPLAY_ACTIVE; - if (aconnector->dc_sink != NULL) - link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal); + if (aconnector->dc_sink) + sink = aconnector->dc_sink; + else if (aconnector->dc_em_sink) + sink = aconnector->dc_em_sink; + + if (sink != NULL) + link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal); display->controller = CONTROLLER_ID_D0 + config->otg_inst; display->dig_fe = config->dig_fe; @@ -470,8 +477,9 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) display->stream_enc_idx = config->stream_enc_idx; link->link_enc_idx = config->link_enc_idx; link->phy_idx = config->phy_idx; - link->hdcp_supported_informational = dc_link_is_hdcp14(aconnector->dc_link, - aconnector->dc_sink->sink_signal) ? 1 : 0; + if (sink) + link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal); + link->hdcp_supported_informational = link_is_hdcp14; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; link->dp.assr_enabled = config->assr_enabled; link->dp.mst_enabled = config->mst_enabled; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 6fee12c91ef5..1aa69dd8e02f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -40,6 +40,39 @@ #include "dm_helpers.h" +struct monitor_patch_info { + unsigned int manufacturer_id; + unsigned int product_id; + void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); + unsigned int patch_param; +}; +static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); + +static const struct monitor_patch_info monitor_patch_table[] = { +{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, +{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, +}; + +static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) +{ + if (edid_caps) + edid_caps->panel_patch.max_dsc_target_bpp_limit = param; +} + +static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) +{ + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) + if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) + && (edid_caps->product_id == monitor_patch_table[i].product_id)) { + monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); + ret++; + } + + return ret; +} + /* dm_helpers_parse_edid_caps * * Parse edid caps @@ -125,6 +158,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps( kfree(sads); kfree(sadb); + amdgpu_dm_patch_edid_caps(edid_caps); + return result; } @@ -751,3 +786,17 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) &new_downspread.raw, sizeof(new_downspread)); } + +#if defined(CONFIG_DRM_AMD_DC_DCN) +void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) +{ + // FPGA programming for this clock in diags framework that + // needs to go through dm layer, therefore leave dummy interace here +} + + +void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) +{ + /* TODO: add peridic detection implementation */ +} +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 7af0d58c231b..1a99fcc27078 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -542,7 +542,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p params[i].sink->ctx->dc->res_pool->dscs[0], ¶ms[i].sink->dsc_caps.dsc_dec_caps, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, - 0, + params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, 0, params[i].timing, ¶ms[i].timing->dsc_cfg)) { @@ -574,7 +574,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) param.sink->ctx->dc->res_pool->dscs[0], ¶m.sink->dsc_caps.dsc_dec_caps, param.sink->ctx->dc->debug.dsc_min_slice_height_override, - 0, + param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, (int) kbps, param.timing, &dsc_config); return dsc_config.bits_per_pixel; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 6dbde74c1e06..cdb5c027411a 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1604,6 +1604,16 @@ static enum bp_result bios_parser_get_encoder_cap_info( ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0; info->HDMI_6GB_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + info->IS_DP2_CAPABLE = (record->encodercaps & + ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0; + info->DP_UHBR10_EN = (record->encodercaps & + ATOM_ENCODER_CAP_RECORD_UHBR10_EN) ? 1 : 0; + info->DP_UHBR13_5_EN = (record->encodercaps & + ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0; + info->DP_UHBR20_EN = (record->encodercaps & + ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0; +#endif info->DP_IS_USB_C = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index f1f672a997d7..6e333b4af7d6 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -340,6 +340,13 @@ static enum bp_result transmitter_control_v1_7( const struct command_table_helper *cmd = bp->cmd_helper; struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0}; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0; + + if (dc_is_dp_signal(cntl->signal)) + hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_DP_0; +#endif + dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter); dig_v1_7.action = (uint8_t)cntl->action; @@ -353,6 +360,9 @@ static enum bp_result transmitter_control_v1_7( dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel); dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id; +#if defined(CONFIG_DRM_AMD_DC_DCN) + dig_v1_7.HPO_instance = hpo_instance; +#endif dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10; if (cntl->action == TRANSMITTER_CONTROL_ENABLE || diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 0e18df1283b6..6b248cd2a461 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -459,9 +459,9 @@ static void dcn_bw_calc_rq_dlg_ttu( struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs; struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs; struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs; - struct _vcs_dpi_display_rq_params_st rq_param = {0}; - struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0}; - struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } }; + struct _vcs_dpi_display_rq_params_st *rq_param = &pipe->dml_rq_param; + struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param = &pipe->dml_dlg_sys_param; + struct _vcs_dpi_display_e2e_pipe_params_st *input = &pipe->dml_input; float total_active_bw = 0; float total_prefetch_bw = 0; int total_flip_bytes = 0; @@ -470,45 +470,48 @@ static void dcn_bw_calc_rq_dlg_ttu( memset(dlg_regs, 0, sizeof(*dlg_regs)); memset(ttu_regs, 0, sizeof(*ttu_regs)); memset(rq_regs, 0, sizeof(*rq_regs)); + memset(rq_param, 0, sizeof(*rq_param)); + memset(dlg_sys_param, 0, sizeof(*dlg_sys_param)); + memset(input, 0, sizeof(*input)); for (i = 0; i < number_of_planes; i++) { total_active_bw += v->read_bandwidth[i]; total_prefetch_bw += v->prefetch_bandwidth[i]; total_flip_bytes += v->total_immediate_flip_bytes[i]; } - dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw); - if (dlg_sys_param.total_flip_bw < 0.0) - dlg_sys_param.total_flip_bw = 0; - - dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark; - dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark; - dlg_sys_param.t_urg_wm_us = v->urgent_watermark; - dlg_sys_param.t_extra_us = v->urgent_extra_latency; - dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep; - dlg_sys_param.total_flip_bytes = total_flip_bytes; - - pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe); - input.clks_cfg.dcfclk_mhz = v->dcfclk; - input.clks_cfg.dispclk_mhz = v->dispclk; - input.clks_cfg.dppclk_mhz = v->dppclk; - input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - input.clks_cfg.socclk_mhz = v->socclk; - input.clks_cfg.voltage = v->voltage_level; + dlg_sys_param->total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw); + if (dlg_sys_param->total_flip_bw < 0.0) + dlg_sys_param->total_flip_bw = 0; + + dlg_sys_param->t_mclk_wm_us = v->dram_clock_change_watermark; + dlg_sys_param->t_sr_wm_us = v->stutter_enter_plus_exit_watermark; + dlg_sys_param->t_urg_wm_us = v->urgent_watermark; + dlg_sys_param->t_extra_us = v->urgent_extra_latency; + dlg_sys_param->deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep; + dlg_sys_param->total_flip_bytes = total_flip_bytes; + + pipe_ctx_to_e2e_pipe_params(pipe, &input->pipe); + input->clks_cfg.dcfclk_mhz = v->dcfclk; + input->clks_cfg.dispclk_mhz = v->dispclk; + input->clks_cfg.dppclk_mhz = v->dppclk; + input->clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + input->clks_cfg.socclk_mhz = v->socclk; + input->clks_cfg.voltage = v->voltage_level; // dc->dml.logger = pool->base.logger; - input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444; - input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp; + input->dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444; + input->dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp; //input[in_idx].dout.output_standard; /*todo: soc->sr_enter_plus_exit_time??*/ - dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; + dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; - dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src); + dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src); dml1_extract_rq_regs(dml, rq_regs, rq_param); dml1_rq_dlg_get_dlg_params( dml, dlg_regs, ttu_regs, - rq_param.dlg, + &rq_param->dlg, dlg_sys_param, input, true, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 0d01aa9f15a6..315466f5aade 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -38,6 +38,8 @@ #include "clk/clk_11_0_0_offset.h" #include "clk/clk_11_0_0_sh_mask.h" +#include "irq/dcn20/irq_service_dcn20.h" + #undef FN #define FN(reg_name, field_name) \ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name @@ -221,6 +223,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, bool force_reset = false; bool p_state_change_support; int total_plane_count; + int irq_src; + uint32_t hpd_state; if (dc->work_arounds.skip_clock_update) return; @@ -238,7 +242,13 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->res_pool->pp_smu) pp_smu = &dc->res_pool->pp_smu->nv_funcs; - if (display_count == 0) + for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) { + hpd_state = dal_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src); + if (hpd_state) + break; + } + + if (display_count == 0 && !hpd_state) enter_display_off = true; if (enter_display_off == safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 6185f9475fa2..3fabf32a0558 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -42,6 +42,7 @@ #include "clk/clk_10_0_2_sh_mask.h" #include "renoir_ip_offset.h" +#include "irq/dcn21/irq_service_dcn21.h" /* Constants */ @@ -66,11 +67,9 @@ int rn_get_active_display_cnt_wa( for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; - /* Extend the WA to DP for Linux*/ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || - stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK || - stream->signal == SIGNAL_TYPE_DISPLAY_PORT) + stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) tmds_present = true; } @@ -131,9 +130,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; int display_count; + int irq_src; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; + uint32_t hpd_state; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; @@ -149,8 +150,15 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { display_count = rn_get_active_display_cnt_wa(dc, context); + + for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) { + hpd_state = dal_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src); + if (hpd_state) + break; + } + /* if we can go lower, go lower */ - if (display_count == 0) { + if (display_count == 0 && !hpd_state) { rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 7046da14bb2a..3eee32faa208 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -582,8 +582,8 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 5.32, - .sr_enter_plus_exit_time_us = 6.38, + .sr_exit_time_us = 7.95, + .sr_enter_plus_exit_time_us = 9, .valid = true, }, { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 4a4894e9d9c9..d7bf9283dc90 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -87,7 +87,7 @@ int dcn31_get_active_display_cnt_wa( const struct dc_link *link = dc->links[i]; /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ - if (link->link_enc->funcs->is_dig_enabled && + if (link->link_enc && link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } @@ -142,6 +142,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { dcn31_smu_set_Z9_support(clk_mgr, true); + dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } @@ -166,6 +167,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { dcn31_smu_set_Z9_support(clk_mgr, false); + dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } @@ -640,7 +642,7 @@ void dcn31_clk_mgr_construct( sizeof(struct dcn31_watermarks), &clk_mgr->smu_wm_set.mc_address.quad_part); - if (clk_mgr->smu_wm_set.wm_set == 0) { + if (!clk_mgr->smu_wm_set.wm_set) { clk_mgr->smu_wm_set.wm_set = &dummy_wms; clk_mgr->smu_wm_set.mc_address.quad_part = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c798c65d4276..331a7517176b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -71,6 +71,8 @@ #include "dmub/dmub_srv.h" +#include "dcn30/dcn30_vpg.h" + #include "i2caux_interface.h" #include "dce/dmub_hw_lock_mgr.h" @@ -255,6 +257,24 @@ static bool create_links( goto failed_alloc; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && + dc->caps.dp_hpo && + link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) { + /* FPGA case - Allocate HPO DP link encoder */ + if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) { + link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i]; + + if (link->hpo_dp_link_enc == NULL) { + BREAK_TO_DEBUGGER(); + goto failed_alloc; + } + link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source; + link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter; + } + } +#endif + link->link_status.dpcd_caps = &link->dpcd_caps; enc_init.ctx = dc->ctx; @@ -1544,7 +1564,7 @@ static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) } #if defined(CONFIG_DRM_AMD_DC_DCN) -void dc_z10_restore(struct dc *dc) +void dc_z10_restore(const struct dc *dc) { if (dc->hwss.z10_restore) dc->hwss.z10_restore(dc); @@ -1783,6 +1803,11 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) post_surface_trace(dc); + if (dc->ctx->dce_version >= DCE_VERSION_MAX) + TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); + else + TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); + if (is_flip_pending_in_pipes(dc, context)) return; @@ -1990,7 +2015,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa } if (u->plane_info->dcc.enable != u->surface->dcc.enable - || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks + || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { /* During DCC on/off, stutter period is calculated before * DCC has fully transitioned. This results in incorrect @@ -2532,6 +2557,9 @@ static void commit_planes_do_stream_update(struct dc *dc, enum surface_update_type update_type, struct dc_state *context) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct vpg *vpg; +#endif int j; // Stream updates @@ -2552,6 +2580,11 @@ static void commit_planes_do_stream_update(struct dc *dc, stream_update->vrr_infopacket || stream_update->vsc_infopacket || stream_update->vsp_infopacket) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + vpg = pipe_ctx->stream_res.stream_enc->vpg; + if (vpg && vpg->funcs->vpg_poweron) + vpg->funcs->vpg_poweron(vpg); +#endif resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); } @@ -2968,6 +3001,9 @@ void dc_commit_updates_for_stream(struct dc *dc, if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) new_pipe->plane_state->force_full_update = true; } + } else if (update_type == UPDATE_TYPE_FAST) { + /* Previous frame finished and HW is ready for optimization. */ + dc_post_update_surfaces_to_stream(dc); } @@ -3024,15 +3060,6 @@ void dc_commit_updates_for_stream(struct dc *dc, pipe_ctx->plane_state->force_full_update = false; } } - /*let's use current_state to update watermark etc*/ - if (update_type >= UPDATE_TYPE_FULL) { - dc_post_update_surfaces_to_stream(dc); - - if (dc_ctx->dce_version >= DCE_VERSION_MAX) - TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); - else - TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); - } return; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 1e44b13c1c7d..2bd38d19a447 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -51,6 +51,8 @@ #include "inc/link_enc_cfg.h" #include "inc/link_dpcd.h" +#include "dc/dcn30/dcn30_vpg.h" + #define DC_LOGGER_INIT(logger) #define LINK_INFO(...) \ @@ -64,6 +66,31 @@ /******************************************************************************* * Private functions ******************************************************************************/ +#if defined(CONFIG_DRM_AMD_DC_DCN) +static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link) +{ + struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder( + link->dc->res_pool); + + if (!link->hpo_dp_link_enc && enc) { + link->hpo_dp_link_enc = enc; + link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter; + link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source; + } + + return (link->hpo_dp_link_enc != NULL); +} + +static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link) +{ + if (link->hpo_dp_link_enc) { + link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN; + link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN; + link->hpo_dp_link_enc = NULL; + } +} +#endif + static void dc_link_destruct(struct dc_link *link) { int i; @@ -91,6 +118,12 @@ static void dc_link_destruct(struct dc_link *link) link->link_enc->funcs->destroy(&link->link_enc); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link->hpo_dp_link_enc) { + remove_dp_hpo_link_encoder_from_link(link); + } +#endif + if (link->local_sink) dc_sink_release(link->local_sink); @@ -928,6 +961,11 @@ static bool dc_link_detect_helper(struct dc_link *link, return false; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING) + add_dp_hpo_link_encoder_to_link(link); +#endif + if (link->type == dc_connection_mst_branch) { LINK_INFO("link=%d, mst branch is now Connected\n", link->link_index); @@ -1173,6 +1211,11 @@ static bool dc_link_detect_helper(struct dc_link *link, sizeof(link->mst_stream_alloc_table.stream_allocations)); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) + reset_dp_hpo_stream_encoders_for_link(link); +#endif + link->type = dc_connection_none; sink_caps.signal = SIGNAL_TYPE_NONE; /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk @@ -1209,6 +1252,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } } +#if defined(CONFIG_DRM_AMD_DC_DCN) + dc_z10_restore(dc); +#endif + /* get out of low power state */ if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT) clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); @@ -1549,6 +1596,9 @@ static bool dc_link_construct(struct dc_link *link, } DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); +#if defined(CONFIG_DRM_AMD_DC_DCN) + DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); +#endif /* Update link encoder tracking variables. These are used for the dynamic * assignment of link encoders to streams. @@ -1741,17 +1791,36 @@ static enum dc_status enable_link_dp(struct dc_state *state, /* get link settings for video mode timing */ decide_link_settings(stream, &link_settings); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { + dp_enable_mst_on_sink(link, true); + } +#endif + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { /*in case it is not on*/ link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) { + /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ + } else { + pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = + link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; + if (state->clk_mgr && !apply_seamless_boot_optimization) + state->clk_mgr->funcs->update_clocks(state->clk_mgr, + state, false); + } +#else pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = - link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; + link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; if (state->clk_mgr && !apply_seamless_boot_optimization) state->clk_mgr->funcs->update_clocks(state->clk_mgr, - state, false); + state, false); +#endif // during mode switch we do DP_SET_POWER off then on, and OUI is lost dpcd_set_source_specific_data(link); @@ -1780,7 +1849,12 @@ static enum dc_status enable_link_dp(struct dc_state *state, else fec_enable = true; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) + dp_set_fec_enable(link, fec_enable); +#else dp_set_fec_enable(link, fec_enable); +#endif // during mode set we do DP_SET_POWER off then on, aux writes are lost if (link->dpcd_sink_ext_caps.bits.oled == 1 || @@ -2284,6 +2358,9 @@ static void disable_link(struct dc_link *link, enum signal_type signal) if (dc_is_dp_signal(signal)) { /* SST DP, eDP */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_link_settings link_settings = link->cur_link_settings; +#endif if (dc_is_dp_sst_signal(signal)) dp_disable_link_phy(link, signal); else @@ -2291,8 +2368,15 @@ static void disable_link(struct dc_link *link, enum signal_type signal) if (dc_is_dp_sst_signal(signal) || link->mst_stream_alloc_table.stream_count == 0) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { + dp_set_fec_enable(link, false); + dp_set_fec_ready(link, false); + } +#else dp_set_fec_enable(link, false); dp_set_fec_ready(link, false); +#endif } } else { if (signal != SIGNAL_TYPE_VIRTUAL) @@ -2475,9 +2559,14 @@ static bool dp_active_dongle_validate_timing( break; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && + dongle_caps->extendedCapValid == true) { +#else if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || dongle_caps->extendedCapValid == false) return true; +#endif /* Check Pixel Encoding */ switch (timing->pixel_encoding) { @@ -2520,6 +2609,89 @@ static bool dp_active_dongle_validate_timing( if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) return false; +#if defined(CONFIG_DRM_AMD_DC_DCN) + } + + if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && + dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 && + dongle_caps->dfp_cap_ext.supported) { + + if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000)) + return false; + + if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable) + return false; + + if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable) + return false; + + if (timing->pixel_encoding == PIXEL_ENCODING_RGB) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_666 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc) + return false; + } + } +#endif + return true; } @@ -3024,6 +3196,107 @@ static void update_mst_stream_alloc_table( link->mst_stream_alloc_table.stream_allocations[i] = work_table[i]; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +/* + * Payload allocation/deallocation for SST introduced in DP2.0 + */ +enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; + struct link_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + DC_LOGGER_INIT(link->ctx->logger); + + /* slot X.Y for SST payload deallocate */ + if (!allocate) { + avg_time_slots_per_mtp = dc_fixpt_from_int(0); + + DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream" + "X: %d " + "Y: %d", + dc_fixpt_floor( + avg_time_slots_per_mtp), + dc_fixpt_ceil( + dc_fixpt_shl( + dc_fixpt_sub_int( + avg_time_slots_per_mtp, + dc_fixpt_floor( + avg_time_slots_per_mtp)), + 26))); + + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( + hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + avg_time_slots_per_mtp); + } + + /* calculate VC payload and update branch with new payload allocation table*/ + if (!dpcd_write_128b_132b_sst_payload_allocation_table( + stream, + link, + &proposed_table, + allocate)) { + DC_LOG_ERROR("SST Update Payload: Failed to update " + "allocation table for " + "pipe idx: %d\n", + pipe_ctx->pipe_idx); + } + + proposed_table.stream_allocations[0].hpo_dp_stream_enc = hpo_dp_stream_encoder; + + ASSERT(proposed_table.stream_count == 1); + + //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id + DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p " + "vcp_id: %d " + "slot_count: %d\n", + (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc, + proposed_table.stream_allocations[0].vcp_id, + proposed_table.stream_allocations[0].slot_count); + + /* program DP source TX for payload */ + hpo_dp_link_encoder->funcs->update_stream_allocation_table( + hpo_dp_link_encoder, + &proposed_table); + + /* poll for ACT handled */ + if (!dpcd_poll_for_allocation_change_trigger(link)) { + // Failures will result in blackscreen and errors logged + BREAK_TO_DEBUGGER(); + } + + /* slot X.Y for SST payload allocate */ + if (allocate) { + avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link); + + DC_LOG_DP2("SST Update Payload: " + "slot.X: %d " + "slot.Y: %d", + dc_fixpt_floor( + avg_time_slots_per_mtp), + dc_fixpt_ceil( + dc_fixpt_shl( + dc_fixpt_sub_int( + avg_time_slots_per_mtp, + dc_fixpt_floor( + avg_time_slots_per_mtp)), + 26))); + + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( + hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + avg_time_slots_per_mtp); + } + + /* Always return DC_OK. + * If part of sequence fails, log failure(s) and show blackscreen + */ + return DC_OK; +} +#endif /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table * because stream_encoder is not exposed to dm @@ -3198,6 +3471,10 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) { struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct link_encoder *link_enc = NULL; +#endif + if (cp_psp && cp_psp->funcs.update_stream_config) { struct cp_psp_stream_config config = {0}; enum dp_panel_mode panel_mode = @@ -3209,8 +3486,23 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; #if defined(CONFIG_DRM_AMD_DC_DCN) config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; - config.link_enc_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; - config.phy_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) { + link_enc = pipe_ctx->stream->link->link_enc; + config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + } else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_stream( + pipe_ctx->stream->ctx->dc, + pipe_ctx->stream); + config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */ + } + ASSERT(link_enc); + if (link_enc) + config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + if (is_dp_128b_132b_signal(pipe_ctx)) { + config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; + config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst; + config.dp2_enabled = 1; + } #endif config.dpms_off = dpms_off; config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; @@ -3222,15 +3514,100 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) } #endif +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct link_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + uint8_t req_slot_count = 0; + uint8_t vc_id = 1; /// VC ID always 1 for SST + + struct dc_link_settings link_settings = {0}; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + decide_link_settings(stream, &link_settings); + stream->link->cur_link_settings = link_settings; + + /* Enable clock, Configure lane count, and Enable Link Encoder*/ + enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings); + +#ifdef DIAGS_BUILD + /* Workaround for FPGA HPO capture DP link data: + * HPO capture will set link to active mode + * This workaround is required to get a capture from start of frame + */ + if (!dc->debug.fpga_hpo_capture_en) { + struct encoder_set_dp_phy_pattern_param params = {0}; + params.dp_phy_pattern = DP_TEST_PATTERN_VIDEO_MODE; + + /* Set link active */ + stream->link->hpo_dp_link_enc->funcs->set_link_test_pattern( + stream->link->hpo_dp_link_enc, + ¶ms); + } +#endif + + /* Enable DP_STREAM_ENC */ + dc->hwss.enable_stream(pipe_ctx); + + /* Set DPS PPS SDP (AKA "info frames") */ + if (pipe_ctx->stream->timing.flags.DSC) { + dp_set_dsc_pps_sdp(pipe_ctx, true, true); + } + + /* Allocate Payload */ + if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) { + // MST case + uint8_t i; + + proposed_table.stream_count = state->stream_count; + for (i = 0; i < state->stream_count; i++) { + avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + proposed_table.stream_allocations[i].slot_count = req_slot_count; + proposed_table.stream_allocations[i].vcp_id = i+1; + /* NOTE: This makes assumption that pipe_ctx index is same as stream index */ + proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc; + } + } else { + // SST case + avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, stream->link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + proposed_table.stream_count = 1; /// Always 1 stream for SST + proposed_table.stream_allocations[0].slot_count = req_slot_count; + proposed_table.stream_allocations[0].vcp_id = vc_id; + proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + } + + stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table( + stream->link->hpo_dp_link_enc, + &proposed_table); + + stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size( + stream->link->hpo_dp_link_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc->inst, + avg_time_slots_per_mtp); + + + + dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); +} +#endif + void core_link_enable_stream( struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->sink->link; enum dc_status status; + struct link_encoder *link_enc; #if defined(CONFIG_DRM_AMD_DC_DCN) enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; + struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; #endif DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -3238,23 +3615,57 @@ void core_link_enable_stream( dc_is_virtual_signal(pipe_ctx->stream->signal)) return; + if (dc->res_pool->funcs->link_encs_assign && stream->link->ep_type != DISPLAY_ENDPOINT_PHY) + link_enc = link_enc_cfg_get_link_enc_used_by_stream(dc, stream); + else + link_enc = stream->link->link_enc; + ASSERT(link_enc); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!dc_is_virtual_signal(pipe_ctx->stream->signal) + && !is_dp_128b_132b_signal(pipe_ctx)) { +#else if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) { - stream->link->link_enc->funcs->setup( - stream->link->link_enc, - pipe_ctx->stream->signal); +#endif + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst, stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE); } - if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute( + pipe_ctx->stream_res.hpo_dp_stream_enc, + &stream->timing, + stream->output_color_space, + stream->use_vsc_sdp_for_colorimetry, + stream->timing.flags.DSC, + false); + otg_out_dest = OUT_MUX_HPO_DP; + } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute( + pipe_ctx->stream_res.stream_enc, + &stream->timing, + stream->output_color_space, + stream->use_vsc_sdp_for_colorimetry, + stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + } +#else + pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute( pipe_ctx->stream_res.stream_enc, &stream->timing, stream->output_color_space, stream->use_vsc_sdp_for_colorimetry, stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); +#endif + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute( @@ -3288,9 +3699,18 @@ void core_link_enable_stream( pipe_ctx->stream->apply_edp_fast_boot_optimization = false; +#if defined(CONFIG_DRM_AMD_DC_DCN) + // Enable VPG before building infoframe + if (vpg && vpg->funcs->vpg_poweron) + vpg->funcs->vpg_poweron(vpg); +#endif + resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { pipe_ctx->stream->dpms_off = false; @@ -3365,10 +3785,16 @@ void core_link_enable_stream( * as a workaround for the incorrect value being applied * from transmitter control. */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || + is_dp_128b_132b_signal(pipe_ctx))) +#else if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) - stream->link->link_enc->funcs->setup( - stream->link->link_enc, - pipe_ctx->stream->signal); +#endif + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); dc->hwss.enable_stream(pipe_ctx); @@ -3377,12 +3803,17 @@ void core_link_enable_stream( if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { dp_set_dsc_on_rx(pipe_ctx, true); - dp_set_dsc_pps_sdp(pipe_ctx, true); + dp_set_dsc_pps_sdp(pipe_ctx, true, true); } } if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_allocate_mst_payload(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + is_dp_128b_132b_signal(pipe_ctx)) + dc_link_update_sst_payload(pipe_ctx, true); +#endif dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); @@ -3399,6 +3830,11 @@ void core_link_enable_stream( dc->hwss.enable_audio_stream(pipe_ctx); } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx); + } +#endif if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); @@ -3415,6 +3851,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; +#endif if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) @@ -3434,6 +3873,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + is_dp_128b_132b_signal(pipe_ctx)) + dc_link_update_sst_payload(pipe_ctx, false); +#endif if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { struct ext_hdmi_settings settings = {0}; @@ -3460,14 +3904,44 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) } } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + !is_dp_128b_132b_signal(pipe_ctx)) { + + /* In DP1.x SST mode, our encoder will go to TPS1 + * when link is on but stream is off. + * Disabling link before stream will avoid exposing TPS1 pattern + * during the disable sequence as it will confuse some receivers + * state machine. + * In DP2 or MST mode, our encoder will stay video active + */ + disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); + dc->hwss.disable_stream(pipe_ctx); + } else { + dc->hwss.disable_stream(pipe_ctx); + disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); + } +#else disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); dc->hwss.disable_stream(pipe_ctx); +#endif if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, false); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + if (pipe_ctx->stream_res.tg->funcs->set_out_mux) + pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); + } +#endif + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (vpg && vpg->funcs->vpg_powerdown) + vpg->funcs->vpg_powerdown(vpg); +#endif } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) @@ -3600,6 +4074,13 @@ void dc_link_set_preferred_training_settings(struct dc *dc, if (link_setting != NULL) { link->preferred_link_setting = *link_setting; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(link_setting) == + DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) { + if (!add_dp_hpo_link_encoder_to_link(link)) + memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting)); + } +#endif } else { link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; @@ -3641,6 +4122,38 @@ uint32_t dc_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_setting) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint32_t total_data_bw_efficiency_x10000 = 0; + uint32_t link_rate_per_lane_kbps = 0; + + switch (dp_get_link_encoding_format(link_setting)) { + case DP_8b_10b_ENCODING: + /* For 8b/10b encoding: + * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane. + * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported. + */ + link_rate_per_lane_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; + if (dc_link_should_enable_fec(link)) { + total_data_bw_efficiency_x10000 /= 100; + total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; + } + break; + case DP_128b_132b_ENCODING: + /* For 128b/132b encoding: + * link rate is defined in the unit of 10mbps per lane. + * total data bandwidth efficiency is always 96.71%. + */ + link_rate_per_lane_kbps = link_setting->link_rate * 10000; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; + break; + default: + break; + } + + /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ + return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000; +#else uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */ @@ -3671,9 +4184,9 @@ uint32_t dc_link_bandwidth_kbps( long long fec_link_bw_kbps = link_bw_kbps * 970LL; link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL)); } - return link_bw_kbps; +#endif } const struct dc_link_settings *dc_link_get_link_cap( @@ -3700,14 +4213,14 @@ bool dc_link_is_fec_supported(const struct dc_link *link) */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); if (link_enc == NULL) - link_enc = link_enc_cfg_get_next_avail_link_enc(link->dc, link->dc->current_state); + link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); } else link_enc = link->link_enc; ASSERT(link_enc); - return (dc_is_dp_signal(link->connector_signal) && + return (dc_is_dp_signal(link->connector_signal) && link_enc && link_enc->features.fec_supported && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); @@ -3721,8 +4234,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link) if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && link->local_sink && link->local_sink->edid_caps.panel_patch.disable_fec) || - (link->connector_signal == SIGNAL_TYPE_EDP && - link->dc->debug.force_enable_edp_fec == false)) // Disable FEC for eDP + (link->connector_signal == SIGNAL_TYPE_EDP + )) is_fec_disable = true; if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index f6dbc5a74757..6421c896f2a1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -61,6 +61,43 @@ enum { POST_LT_ADJ_REQ_TIMEOUT = 200 }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct dp_lt_fallback_entry { + enum dc_lane_count lane_count; + enum dc_link_rate link_rate; +}; + +static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { + /* This link training fallback array is ordered by + * link bandwidth from highest to lowest. + * DP specs makes it a normative policy to always + * choose the next highest link bandwidth during + * link training fallback. + */ + {LANE_COUNT_FOUR, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR20}, + {LANE_COUNT_TWO, LINK_RATE_UHBR10}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH2}, + {LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_HIGH2}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_HIGH3}, + {LANE_COUNT_FOUR, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH2}, + {LANE_COUNT_TWO, LINK_RATE_HIGH}, + {LANE_COUNT_TWO, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_LOW}, +}; +#endif + static bool decide_fallback_link_setting( struct dc_link_settings initial_link_settings, struct dc_link_settings *current_link_setting, @@ -74,15 +111,27 @@ static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, { union training_aux_rd_interval training_rd_interval; uint32_t wait_in_micro_secs = 100; - +#if defined(CONFIG_DRM_AMD_DC_DCN) memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + } +#else core_link_read_dpcd( link, DP_TRAINING_AUX_RD_INTERVAL, (uint8_t *)&training_rd_interval, sizeof(training_rd_interval)); if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; +#endif return wait_in_micro_secs; } @@ -90,6 +139,36 @@ static uint32_t get_eq_training_aux_rd_interval( struct dc_link *link, const struct dc_link_settings *link_settings) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + union training_aux_rd_interval training_rd_interval; + + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + core_link_read_dpcd( + link, + DP_128b_132b_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } + + switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { + case 0: return 400; + case 1: return 4000; + case 2: return 8000; + case 3: return 12000; + case 4: return 16000; + case 5: return 32000; + case 6: return 64000; + default: return 400; + } +#else union training_aux_rd_interval training_rd_interval; uint32_t wait_in_micro_secs = 400; @@ -109,13 +188,21 @@ static uint32_t get_eq_training_aux_rd_interval( } return wait_in_micro_secs; +#endif } void dp_wait_for_training_aux_rd_interval( struct dc_link *link, uint32_t wait_in_micro_secs) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (wait_in_micro_secs > 16000) + msleep(wait_in_micro_secs/1000); + else + udelay(wait_in_micro_secs); +#else udelay(wait_in_micro_secs); +#endif DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", __func__, @@ -143,6 +230,17 @@ enum dpcd_training_patterns case DP_TRAINING_PATTERN_SEQUENCE_4: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_TPS1: + dpcd_tr_pattern = DPCD_128b_132b_TPS1; + break; + case DP_128b_132b_TPS2: + dpcd_tr_pattern = DPCD_128b_132b_TPS2; + break; + case DP_128b_132b_TPS2_CDS: + dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; + break; +#endif case DP_TRAINING_PATTERN_VIDEOIDLE: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; break; @@ -181,13 +279,57 @@ static void dpcd_set_training_pattern( static enum dc_dp_training_pattern decide_cr_training_pattern( const struct dc_link_settings *link_settings) { - return DP_TRAINING_PATTERN_SEQUENCE_1; + switch (dp_get_link_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + default: + return DP_TRAINING_PATTERN_SEQUENCE_1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_ENCODING: + return DP_128b_132b_TPS1; +#endif + } } static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, const struct dc_link_settings *link_settings) { struct link_encoder *link_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct encoder_feature_support *enc_caps; + struct dpcd_caps *rx_caps = &link->dpcd_caps; + enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + + /* Access link encoder capability based on whether it is statically + * or dynamically assigned to a link. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + else + link_enc = link->link_enc; + ASSERT(link_enc); + enc_caps = &link_enc->features; + + switch (dp_get_link_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + if (enc_caps->flags.bits.IS_TPS4_CAPABLE && + rx_caps->max_down_spread.bits.TPS4_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && + rx_caps->max_ln_count.bits.TPS3_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_3; + else + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + case DP_128b_132b_ENCODING: + pattern = DP_128b_132b_TPS2; + break; + default: + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + } + return pattern; +#else enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2; struct encoder_feature_support *features; struct dpcd_caps *dpcd_caps = &link->dpcd_caps; @@ -197,7 +339,7 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -218,8 +360,39 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li return DP_TRAINING_PATTERN_SEQUENCE_3; return DP_TRAINING_PATTERN_SEQUENCE_2; +#endif } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) +{ + uint8_t link_rate = 0; + enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings); + + if (encoding == DP_128b_132b_ENCODING) + switch (link_settings->link_rate) { + case LINK_RATE_UHBR10: + link_rate = 0x1; + break; + case LINK_RATE_UHBR20: + link_rate = 0x2; + break; + case LINK_RATE_UHBR13_5: + link_rate = 0x4; + break; + default: + link_rate = 0; + break; + } + else if (encoding == DP_8b_10b_ENCODING) + link_rate = (uint8_t) link_settings->link_rate; + else + link_rate = 0; + + return link_rate; +} +#endif + enum dc_status dpcd_set_link_settings( struct dc_link *link, const struct link_training_settings *lt_settings) @@ -269,7 +442,11 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_LINK_RATE_SET, <_settings->link_settings.link_rate_set, 1); } else { +#if defined(CONFIG_DRM_AMD_DC_DCN) + rate = get_dpcd_link_rate(<_settings->link_settings); +#else rate = (uint8_t) (lt_settings->link_settings.link_rate); +#endif status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); } @@ -311,6 +488,10 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( disable_scrabled_data_symbols = 1; break; case DP_TRAINING_PATTERN_SEQUENCE_4: +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_TPS1: + case DP_128b_132b_TPS2: +#endif disable_scrabled_data_symbols = 0; break; default: @@ -378,6 +559,26 @@ static void dpcd_set_lt_pattern_and_lane_settings( for (lane = 0; lane < (uint32_t)(lt_settings->link_settings.lane_count); lane++) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + dpcd_lane[lane].tx_ffe.PRESET_VALUE = + lt_settings->lane_settings[lane].FFE_PRESET.settings.level; + } else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + dpcd_lane[lane].bits.VOLTAGE_SWING_SET = + (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING); + dpcd_lane[lane].bits.PRE_EMPHASIS_SET = + (uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS); + + dpcd_lane[lane].bits.MAX_SWING_REACHED = + (lt_settings->lane_settings[lane].VOLTAGE_SWING == + VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); + dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED = + (lt_settings->lane_settings[lane].PRE_EMPHASIS == + PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); + } +#else dpcd_lane[lane].bits.VOLTAGE_SWING_SET = (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING); dpcd_lane[lane].bits.PRE_EMPHASIS_SET = @@ -389,6 +590,7 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED = (lt_settings->lane_settings[lane].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); +#endif } /* concatenate everything into one buffer*/ @@ -402,6 +604,18 @@ static void dpcd_set_lt_pattern_and_lane_settings( size_in_bytes); if (is_repeater(link, offset)) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_lane[0].tx_ffe.PRESET_VALUE); + else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) +#endif DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, @@ -412,6 +626,16 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_lane[0].bits.MAX_SWING_REACHED, dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_lane[0].tx_ffe.PRESET_VALUE); + else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) +#endif DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, dpcd_base_lt_offset, @@ -436,6 +660,15 @@ static void dpcd_set_lt_pattern_and_lane_settings( (uint8_t *)(dpcd_lane), size_in_bytes); +#if defined(CONFIG_DRM_AMD_DC_DCN) + } else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + dpcd_lt_buffer, + sizeof(dpcd_lt_buffer)); +#endif } else /* write it all in (1 + number-of-lanes)-byte burst*/ core_link_write_dpcd( @@ -506,6 +739,13 @@ void dp_update_drive_settings( dest->lane_settings[lane].POST_CURSOR2 = src.lane_settings[lane].POST_CURSOR2; else dest->lane_settings[lane].POST_CURSOR2 = *dest->post_cursor2; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dest->ffe_preset == NULL) + dest->lane_settings[lane].FFE_PRESET = src.lane_settings[lane].FFE_PRESET; + else + dest->lane_settings[lane].FFE_PRESET = *dest->ffe_preset; +#endif } } @@ -551,6 +791,10 @@ static void find_max_drive_settings( lane_settings[0].PRE_EMPHASIS; /*max_requested.postCursor2 = * link_training_setting->laneSettings[0].postCursor2;*/ +#if defined(CONFIG_DRM_AMD_DC_DCN) + max_requested.FFE_PRESET = + link_training_setting->lane_settings[0].FFE_PRESET; +#endif /* Determine what the maximum of the requested settings are*/ for (lane = 1; lane < link_training_setting->link_settings.lane_count; @@ -576,6 +820,13 @@ static void find_max_drive_settings( link_training_setting->laneSettings[lane].postCursor2; } */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link_training_setting->lane_settings[lane].FFE_PRESET.settings.level > + max_requested.FFE_PRESET.settings.level) + max_requested.FFE_PRESET.settings.level = + link_training_setting-> + lane_settings[lane].FFE_PRESET.settings.level; +#endif } /* make sure the requested settings are @@ -589,6 +840,10 @@ static void find_max_drive_settings( if (max_requested.postCursor2 > PostCursor2_MaxLevel) max_requested.postCursor2 = PostCursor2_MaxLevel; */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) + max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; +#endif /* make sure the pre-emphasis matches the voltage swing*/ if (max_requested.PRE_EMPHASIS > @@ -626,6 +881,10 @@ static void find_max_drive_settings( /*max_lt_setting->laneSettings[lane].postCursor2 = * max_requested.postCursor2; */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + max_lt_setting->lane_settings[lane].FFE_PRESET = + max_requested.FFE_PRESET; +#endif } } @@ -723,12 +982,28 @@ enum dc_status dp_get_lane_status_and_drive_settings( (uint32_t)(link_training_setting->link_settings.lane_count); lane++) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_128b_132b_ENCODING) { + request_settings.lane_settings[lane].FFE_PRESET.raw = + dpcd_lane_adjust[lane].tx_ffe.PRESET_VALUE; + } else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_8b_10b_ENCODING) { + request_settings.lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits. + VOLTAGE_SWING_LANE); + request_settings.lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits. + PRE_EMPHASIS_LANE); + } +#else request_settings.lane_settings[lane].VOLTAGE_SWING = (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits. VOLTAGE_SWING_LANE); request_settings.lane_settings[lane].PRE_EMPHASIS = (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits. PRE_EMPHASIS_LANE); +#endif } /*Note: for postcursor2, read adjusted @@ -767,6 +1042,26 @@ enum dc_status dpcd_set_lane_settings( (uint32_t)(link_training_setting-> link_settings.lane_count); lane++) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_128b_132b_ENCODING) { + dpcd_lane[lane].tx_ffe.PRESET_VALUE = + link_training_setting->lane_settings[lane].FFE_PRESET.settings.level; + } else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_8b_10b_ENCODING) { + dpcd_lane[lane].bits.VOLTAGE_SWING_SET = + (uint8_t)(link_training_setting->lane_settings[lane].VOLTAGE_SWING); + dpcd_lane[lane].bits.PRE_EMPHASIS_SET = + (uint8_t)(link_training_setting->lane_settings[lane].PRE_EMPHASIS); + + dpcd_lane[lane].bits.MAX_SWING_REACHED = + (link_training_setting->lane_settings[lane].VOLTAGE_SWING == + VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); + dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED = + (link_training_setting->lane_settings[lane].PRE_EMPHASIS == + PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); + } +#else dpcd_lane[lane].bits.VOLTAGE_SWING_SET = (uint8_t)(link_training_setting-> lane_settings[lane].VOLTAGE_SWING); @@ -781,6 +1076,7 @@ enum dc_status dpcd_set_lane_settings( (link_training_setting-> lane_settings[lane].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); +#endif } status = core_link_write_dpcd(link, @@ -808,6 +1104,18 @@ enum dc_status dpcd_set_lane_settings( */ if (is_repeater(link, offset)) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + offset, + lane0_set_address, + dpcd_lane[0].tx_ffe.PRESET_VALUE); + else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_8b_10b_ENCODING) +#endif DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, @@ -819,6 +1127,16 @@ enum dc_status dpcd_set_lane_settings( dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + lane0_set_address, + dpcd_lane[0].tx_ffe.PRESET_VALUE); + else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_8b_10b_ENCODING) +#endif DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, lane0_set_address, @@ -954,6 +1272,14 @@ uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval case 0x04: aux_rd_interval_us = 16000; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case 0x05: + aux_rd_interval_us = 32000; + break; + case 0x06: + aux_rd_interval_us = 64000; + break; +#endif default: break; } @@ -994,8 +1320,13 @@ static enum link_training_result perform_channel_equalization_sequence( tr_pattern = lt_settings->pattern_for_eq; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_repeater(link, offset) && dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; +#else if (is_repeater(link, offset)) tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; +#endif dp_set_hw_training_pattern(link, tr_pattern, offset); @@ -1147,9 +1478,28 @@ static enum link_training_result perform_clock_recovery_sequence( return LINK_TRAINING_SUCCESS; /* 6. max VS reached*/ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if ((dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) && + dp_is_max_vs_reached(lt_settings)) + break; +#else if (dp_is_max_vs_reached(lt_settings)) break; +#endif +#if defined(CONFIG_DRM_AMD_DC_DCN) + if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && + lt_settings->lane_settings[0].FFE_PRESET.settings.level == + req_settings.lane_settings[0].FFE_PRESET.settings.level) + retries_cr++; + else if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && + lt_settings->lane_settings[0].VOLTAGE_SWING == + req_settings.lane_settings[0].VOLTAGE_SWING) + retries_cr++; + else + retries_cr = 0; +#else /* 7. same lane settings*/ /* Note: settings are the same for all lanes, * so comparing first lane is sufficient*/ @@ -1160,6 +1510,7 @@ static enum link_training_result perform_clock_recovery_sequence( retries_cr++; else retries_cr = 0; +#endif /* 8. update VS/PE/PC2 in lt_settings*/ dp_update_drive_settings(lt_settings, req_settings); @@ -1194,7 +1545,11 @@ static inline enum link_training_result dp_transition_to_video_idle( * TPS4 must be used instead of POST_LT_ADJ_REQ. */ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || +#if defined(CONFIG_DRM_AMD_DC_DCN) + lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { +#else lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) { +#endif /* delay 5ms after Main Link output idle pattern and then check * DPCD 0202h. */ @@ -1290,6 +1645,32 @@ static inline void decide_8b_10b_training_settings( lt_settings->should_set_fec_ready = true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static inline void decide_128b_132b_training_settings(struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings) +{ + memset(lt_settings, 0, sizeof(*lt_settings)); + + lt_settings->link_settings = *link_settings; + /* TODO: should decide link spread when populating link_settings */ + lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : + LINK_SPREAD_05_DOWNSPREAD_30KHZ; + + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); + lt_settings->eq_pattern_time = 2500; + lt_settings->eq_wait_time_limit = 400000; + lt_settings->eq_loop_count_limit = 20; + lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; + lt_settings->cds_pattern_time = 2500; + lt_settings->cds_wait_time_limit = (dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; + lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ? + LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT; +} +#endif + void dp_decide_training_settings( struct dc_link *link, const struct dc_link_settings *link_settings, @@ -1297,6 +1678,10 @@ void dp_decide_training_settings( { if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) decide_8b_10b_training_settings(link, link_settings, lt_settings); +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) + decide_128b_132b_training_settings(link, link_settings, lt_settings); +#endif } static void override_training_settings( @@ -1325,6 +1710,11 @@ static void override_training_settings( lt_settings->pre_emphasis = overrides->pre_emphasis; if (overrides->post_cursor2 != NULL) lt_settings->post_cursor2 = overrides->post_cursor2; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (overrides->ffe_preset != NULL) + lt_settings->ffe_preset = overrides->ffe_preset; +#endif + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { lt_settings->lane_settings[lane].VOLTAGE_SWING = lt_settings->voltage_swing != NULL ? @@ -1384,7 +1774,7 @@ uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count) return 0; // invalid value } -enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) +static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) { uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; @@ -1395,7 +1785,7 @@ enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) sizeof(repeater_mode)); } -enum dc_status configure_lttpr_mode_non_transparent( +static enum dc_status configure_lttpr_mode_non_transparent( struct dc_link *link, const struct link_training_settings *lt_settings) { @@ -1511,6 +1901,17 @@ static void print_status_message( case LINK_RATE_HIGH3: link_rate = "HBR3"; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case LINK_RATE_UHBR10: + link_rate = "UHBR10"; + break; + case LINK_RATE_UHBR13_5: + link_rate = "UHBR13.5"; + break; + case LINK_RATE_UHBR20: + link_rate = "UHBR20"; + break; +#endif default: break; } @@ -1540,6 +1941,20 @@ static void print_status_message( case LINK_TRAINING_LINK_LOSS: lt_result = "Link loss"; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_LT_FAILED: + lt_result = "LT_FAILED received"; + break; + case DP_128b_132b_MAX_LOOP_COUNT_REACHED: + lt_result = "max loop count reached"; + break; + case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: + lt_result = "channel EQ timeout"; + break; + case DP_128b_132b_CDS_DONE_TIMEOUT: + lt_result = "CDS timeout"; + break; +#endif default: break; } @@ -1559,6 +1974,9 @@ static void print_status_message( } /* Connectivity log: link training */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ +#endif CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", link_rate, lt_settings->link_settings.lane_count, @@ -1641,9 +2059,23 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train static void dpcd_exit_training_mode(struct dc_link *link) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t sink_status = 0; + uint8_t i; +#endif /* clear training pattern set */ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* poll for intra-hop disable */ + for (i = 0; i < 10; i++) { + if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && + (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) + break; + udelay(1000); + } +#endif } enum dc_status dpcd_configure_channel_coding(struct dc_link *link, @@ -1667,6 +2099,131 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link, return status; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, + uint32_t *interval_in_us) +{ + union dp_128b_132b_training_aux_rd_interval dpcd_interval; + uint32_t interval_unit = 0; + + dpcd_interval.raw = 0; + core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL, + &dpcd_interval.raw, sizeof(dpcd_interval.raw)); + interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ + /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * + * INTERVAL_UNIT. The maximum is 256 ms + */ + *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; +} + +static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + uint8_t loop_count = 0; + uint32_t aux_rd_interval = 0; + uint32_t wait_time = 0; + struct link_training_settings req_settings; + union lane_align_status_updated dpcd_lane_status_updated = { {0} }; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + enum link_training_result status = LINK_TRAINING_SUCCESS; + + /* Transmit 128b/132b_TPS1 over Main-Link and Set TRAINING_PATTERN_SET to 01h */ + dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX); + dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); + + /* Adjust TX_FFE_PRESET_VALUE as requested */ + dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, &req_settings, DPRX); + dp_update_drive_settings(lt_settings, req_settings); + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + dp_set_hw_lane_settings(link, lt_settings, DPRX); + dpcd_set_lane_settings(link, lt_settings, DPRX); + + /* Transmit 128b/132b_TPS2 over Main-Link and Set TRAINING_PATTERN_SET to 02h */ + dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX); + dpcd_set_training_pattern(link, lt_settings->pattern_for_eq); + + /* poll for channel EQ done */ + while (status == LINK_TRAINING_SUCCESS) { + loop_count++; + dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); + wait_time += aux_rd_interval; + dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, &req_settings, DPRX); + dp_update_drive_settings(lt_settings, req_settings); + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, + dpcd_lane_status)) { + /* pass */ + break; + } else if (loop_count >= lt_settings->eq_loop_count_limit) { + status = DP_128b_132b_MAX_LOOP_COUNT_REACHED; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + status = DP_128b_132b_LT_FAILED; + } else { + dp_set_hw_lane_settings(link, lt_settings, DPRX); + dpcd_set_lane_settings(link, lt_settings, DPRX); + } + } + + /* poll for EQ interlane align done */ + while (status == LINK_TRAINING_SUCCESS) { + if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (wait_time >= lt_settings->eq_wait_time_limit) { + status = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + status = DP_128b_132b_LT_FAILED; + } else { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->eq_pattern_time); + wait_time += lt_settings->eq_pattern_time; + dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, &req_settings, DPRX); + } + } + + return status; +} + +static enum link_training_result dp_perform_128b_132b_cds_done_sequence( + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + /* Assumption: assume hardware has transmitted eq pattern */ + enum link_training_result status = LINK_TRAINING_SUCCESS; + struct link_training_settings req_settings; + union lane_align_status_updated dpcd_lane_status_updated = { {0} }; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + uint32_t wait_time = 0; + + /* initiate CDS done sequence */ + dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); + + /* poll for CDS interlane align done and symbol lock */ + while (status == LINK_TRAINING_SUCCESS) { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->cds_pattern_time); + wait_time += lt_settings->cds_pattern_time; + dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, &req_settings, DPRX); + if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && + dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + status = DP_128b_132b_LT_FAILED; + } else if (wait_time >= lt_settings->cds_wait_time_limit) { + status = DP_128b_132b_CDS_DONE_TIMEOUT; + } + } + + return status; +} +#endif + static enum link_training_result dp_perform_8b_10b_link_training( struct dc_link *link, struct link_training_settings *lt_settings) @@ -1723,6 +2280,35 @@ static enum link_training_result dp_perform_8b_10b_link_training( return status; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static enum link_training_result dp_perform_128b_132b_link_training( + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + + /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ + if (link->dc->debug.legacy_dp2_lt) { + struct link_training_settings legacy_settings; + + decide_8b_10b_training_settings(link, + <_settings->link_settings, + &legacy_settings); + return dp_perform_8b_10b_link_training(link, &legacy_settings); + } + + dpcd_set_link_settings(link, lt_settings); + + if (result == LINK_TRAINING_SUCCESS) + result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings); + + if (result == LINK_TRAINING_SUCCESS) + result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings); + + return result; +} +#endif + enum link_training_result dc_link_dp_perform_link_training( struct dc_link *link, const struct dc_link_settings *link_settings, @@ -1757,6 +2343,10 @@ enum link_training_result dc_link_dp_perform_link_training( */ if (encoding == DP_8b_10b_ENCODING) status = dp_perform_8b_10b_link_training(link, <_settings); +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (encoding == DP_128b_132b_ENCODING) + status = dp_perform_128b_132b_link_training(link, <_settings); +#endif else ASSERT(0); @@ -1794,16 +2384,19 @@ bool perform_link_training_with_retries( /* Dynamically assigned link encoders associated with stream rather than * link. */ - if (link->dc->res_pool->funcs->link_encs_assign) - link_enc = stream->link_enc; + if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream); else link_enc = link->link_enc; /* We need to do this before the link training to ensure the idle pattern in SST * mode will be sent right after the link training */ - link_enc->funcs->connect_dig_be_to_fe(link_enc, + if (dp_get_link_encoding_format(¤t_setting) == DP_8b_10b_ENCODING) { + link_enc->funcs->connect_dig_be_to_fe(link_enc, pipe_ctx->stream_res.stream_enc->id, true); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); + } for (j = 0; j < attempts; ++j) { @@ -1869,12 +2462,16 @@ bool perform_link_training_with_retries( if (type == dc_connection_none) break; } else if (do_fallback) { + uint32_t req_bw; + uint32_t link_bw; + decide_fallback_link_setting(*link_setting, ¤t_setting, status); /* Fail link training if reduced link bandwidth no longer meets * stream requirements. */ - if (dc_bandwidth_in_kbps_from_timing(&stream->timing) < - dc_link_bandwidth_kbps(link, ¤t_setting)) + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + link_bw = dc_link_bandwidth_kbps(link, ¤t_setting); + if (req_bw > link_bw) break; } @@ -1976,8 +2573,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_cs_id, link_settings); /* Set FEC enable */ - fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; - dp_set_fec_ready(link, fec_enable); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { +#endif + fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; + dp_set_fec_ready(link, fec_enable); +#if defined(CONFIG_DRM_AMD_DC_DCN) + } +#endif if (lt_overrides->alternate_scrambler_reset) { if (*lt_overrides->alternate_scrambler_reset) @@ -2019,23 +2622,59 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) * Still shouldn't turn off dp_receiver (DPCD:600h) */ if (link_down == true) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_link_settings link_settings = link->cur_link_settings; +#endif dp_disable_link_phy(link, link->connector_signal); - dp_set_fec_ready(link, false); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) +#endif + dp_set_fec_ready(link, false); } link->sync_lt_in_progress = false; return true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + + if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) + lttpr_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) + lttpr_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) + lttpr_max_link_rate = LINK_RATE_UHBR10; + + return lttpr_max_link_rate; +} +#endif + bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) { + struct link_encoder *link_enc = NULL; + if (!max_link_enc_cap) { DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); return false; } - if (link->link_enc->funcs->get_max_link_cap) { - link->link_enc->funcs->get_max_link_cap(link->link_enc, max_link_enc_cap); + /* Links supporting dynamically assigned link encoder will be assigned next + * available encoder if one not already assigned. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + if (link_enc == NULL) + link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); + } else + link_enc = link->link_enc; + ASSERT(link_enc); + + if (link_enc && link_enc->funcs->get_max_link_cap) { + link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); return true; } @@ -2048,9 +2687,31 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_ static struct dc_link_settings get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; +#if defined(CONFIG_DRM_AMD_DC_DCN) + enum dc_link_rate lttpr_max_link_rate; +#endif + struct link_encoder *link_enc = NULL; + + /* Links supporting dynamically assigned link encoder will be assigned next + * available encoder if one not already assigned. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + if (link_enc == NULL) + link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); + } else + link_enc = link->link_enc; + ASSERT(link_enc); /* get max link encoder capability */ - link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap); + if (link_enc) + link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (max_link_cap.link_rate >= LINK_RATE_UHBR10 && + !link->hpo_dp_link_enc) + max_link_cap.link_rate = LINK_RATE_HIGH3; +#endif /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) @@ -2071,8 +2732,15 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; +#if defined(CONFIG_DRM_AMD_DC_DCN) + lttpr_max_link_rate = get_lttpr_max_link_rate(link); + + if (lttpr_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = lttpr_max_link_rate; +#else if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate) max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; +#endif DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", __func__, @@ -2082,7 +2750,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) return max_link_cap; } -enum dc_status read_hpd_rx_irq_data( +static enum dc_status read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) { @@ -2213,7 +2881,13 @@ bool dp_verify_link_cap( enum link_training_result status; union hpd_irq_data irq_data; - if (link->dc->debug.skip_detection_link_training) { + /* Accept reported capabilities if link supports flexible encoder mapping or encoder already in use. */ + if (link->dc->debug.skip_detection_link_training || + link->is_dig_mapping_flexible) { + link->verified_link_cap = *known_limit_link_setting; + return true; + } else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign && + !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine)) { link->verified_link_cap = *known_limit_link_setting; return true; } @@ -2231,6 +2905,10 @@ bool dp_verify_link_cap( core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) + reset_dp_hpo_stream_encoders_for_link(link); +#endif /* TODO implement override and monitor patch later */ /* try to train the link from high to low to @@ -2253,7 +2931,7 @@ bool dp_verify_link_cap( * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle, * so add extra cycle of enabling and disabling the PHY before first link training. */ - if (link->link_enc->features.flags.bits.DP_IS_USB_C && + if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && link->dc->debug.usbc_combo_phy_reset_wa) { dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur); dp_disable_link_phy(link, link->connector_signal); @@ -2386,7 +3064,17 @@ static struct dc_link_settings get_common_supported_link_settings( * We map it to the maximum supported link rate that * is smaller than MAX_LINK_BW in this case. */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link_settings.link_rate > LINK_RATE_UHBR20) { + link_settings.link_rate = LINK_RATE_UHBR20; + } else if (link_settings.link_rate < LINK_RATE_UHBR20 && + link_settings.link_rate > LINK_RATE_UHBR13_5) { + link_settings.link_rate = LINK_RATE_UHBR13_5; + } else if (link_settings.link_rate < LINK_RATE_UHBR10 && + link_settings.link_rate > LINK_RATE_HIGH3) { +#else if (link_settings.link_rate > LINK_RATE_HIGH3) { +#endif link_settings.link_rate = LINK_RATE_HIGH3; } else if (link_settings.link_rate < LINK_RATE_HIGH3 && link_settings.link_rate > LINK_RATE_HIGH2) { @@ -2431,6 +3119,14 @@ static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) { switch (link_rate) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + case LINK_RATE_UHBR20: + return LINK_RATE_UHBR13_5; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + return LINK_RATE_HIGH3; +#endif case LINK_RATE_HIGH3: return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: @@ -2465,11 +3161,55 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate) return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: return LINK_RATE_HIGH3; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case LINK_RATE_HIGH3: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + return LINK_RATE_UHBR13_5; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR20; +#endif default: return LINK_RATE_UNKNOWN; } } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static bool decide_fallback_link_setting_max_bw_policy( + const struct dc_link_settings *max, + struct dc_link_settings *cur) +{ + uint8_t cur_idx = 0, next_idx; + bool found = false; + + while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find current index */ + if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && + dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) + break; + else + cur_idx++; + + next_idx = cur_idx + 1; + + while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find next index */ + if (dp_lt_fallbacks[next_idx].lane_count <= max->lane_count && + dp_lt_fallbacks[next_idx].link_rate <= max->link_rate) + break; + else + next_idx++; + + if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { + cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; + cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; + found = true; + } + + return found; +} +#endif + /* * function: set link rate and lane count fallback based * on current link setting and last link training result @@ -2485,6 +3225,11 @@ static bool decide_fallback_link_setting( { if (!current_link_setting) return false; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING) + return decide_fallback_link_setting_max_bw_policy(&initial_link_settings, + current_link_setting); +#endif switch (training_result) { case LINK_TRAINING_CR_FAIL_LANE0: @@ -2750,7 +3495,7 @@ void decide_link_settings(struct dc_stream_state *stream, } /*************************Short Pulse IRQ***************************/ -static bool allow_hpd_rx_irq(const struct dc_link *link) +bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link) { /* * Don't handle RX IRQ unless one of following is met: @@ -2857,9 +3602,15 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) union phy_test_pattern dpcd_test_pattern; union lane_adjust dpcd_lane_adjustment[2]; unsigned char dpcd_post_cursor_2_adjustment = 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + unsigned char test_pattern_buffer[ + (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - + DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; +#else unsigned char test_pattern_buffer[ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; +#endif unsigned int test_pattern_size = 0; enum dp_test_pattern test_pattern; struct dc_link_training_settings link_settings; @@ -2925,6 +3676,35 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) case PHY_TEST_PATTERN_CP2520_3: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case PHY_TEST_PATTERN_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; + break; + case PHY_TEST_PATTERN_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2; + break; + case PHY_TEST_PATTERN_PRBS9: + test_pattern = DP_TEST_PATTERN_PRBS9; + break; + case PHY_TEST_PATTERN_PRBS11: + test_pattern = DP_TEST_PATTERN_PRBS11; + break; + case PHY_TEST_PATTERN_PRBS15: + test_pattern = DP_TEST_PATTERN_PRBS15; + break; + case PHY_TEST_PATTERN_PRBS23: + test_pattern = DP_TEST_PATTERN_PRBS23; + break; + case PHY_TEST_PATTERN_PRBS31: + test_pattern = DP_TEST_PATTERN_PRBS31; + break; + case PHY_TEST_PATTERN_264BIT_CUSTOM: + test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM; + break; + case PHY_TEST_PATTERN_SQUARE_PULSE: + test_pattern = DP_TEST_PATTERN_SQUARE_PULSE; + break; +#endif default: test_pattern = DP_TEST_PATTERN_VIDEO_MODE; break; @@ -2940,6 +3720,27 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) test_pattern_size); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) { + test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) + core_link_read_dpcd( + link, + DP_PHY_SQUARE_PATTERN, + test_pattern_buffer, + test_pattern_size); + } + + if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) { + test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256- + DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1; + core_link_read_dpcd( + link, + DP_TEST_264BIT_CUSTOM_PATTERN_7_0, + test_pattern_buffer, + test_pattern_size); + } +#endif + /* prepare link training settings */ link_settings.link = link->cur_link_settings; @@ -2948,6 +3749,24 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) lane++) { dpcd_lane_adjust.raw = get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == + DP_128b_132b_ENCODING) { + link_settings.lane_settings[lane].FFE_PRESET.raw = + dpcd_lane_adjust.tx_ffe.PRESET_VALUE; + } else if (dp_get_link_encoding_format(&link->cur_link_settings) == + DP_8b_10b_ENCODING) { + link_settings.lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing) + (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); + link_settings.lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis) + (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); + link_settings.lane_settings[lane].POST_CURSOR2 = + (enum dc_post_cursor2) + ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); + } +#else link_settings.lane_settings[lane].VOLTAGE_SWING = (enum dc_voltage_swing) (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); @@ -2957,6 +3776,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) link_settings.lane_settings[lane].POST_CURSOR2 = (enum dc_post_cursor2) ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); +#endif } for (i = 0; i < 4; i++) @@ -3184,7 +4004,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video } } -static void handle_automated_test(struct dc_link *link) +void dc_link_dp_handle_automated_test(struct dc_link *link) { union test_request test_request; union test_response test_response; @@ -3233,17 +4053,50 @@ static void handle_automated_test(struct dc_link *link) sizeof(test_response)); } -bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss) +void dc_link_dp_handle_link_loss(struct dc_link *link) +{ + int i; + struct pipe_ctx *pipe_ctx; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + break; + } + + if (pipe_ctx == NULL || pipe_ctx->stream == NULL) + return; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && + pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { + core_link_disable_stream(pipe_ctx); + } + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && + pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { + core_link_enable_stream(link->dc->current_state, pipe_ctx); + } + } +} + +bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, + bool defer_handling, bool *has_left_work) { union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } }; union device_service_irq device_service_clear = { { 0 } }; enum dc_status result; bool status = false; - struct pipe_ctx *pipe_ctx; - int i; if (out_link_loss) *out_link_loss = false; + + if (has_left_work) + *has_left_work = false; /* For use cases related to down stream connection status change, * PSR and device auto test, refer to function handle_sst_hpd_irq * in DAL2.1*/ @@ -3275,11 +4128,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd &device_service_clear.raw, sizeof(device_service_clear.raw)); device_service_clear.raw = 0; - handle_automated_test(link); + if (defer_handling && has_left_work) + *has_left_work = true; + else + dc_link_dp_handle_automated_test(link); return false; } - if (!allow_hpd_rx_irq(link)) { + if (!dc_link_dp_allow_hpd_rx_irq(link)) { DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", __func__, link->link_index); return false; @@ -3293,12 +4149,18 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd * so do not handle as a normal sink status change interrupt. */ - if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { + if (defer_handling && has_left_work) + *has_left_work = true; return true; + } /* check if we have MST msg and return since we poll for it */ - if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + if (defer_handling && has_left_work) + *has_left_work = true; return false; + } /* For now we only handle 'Downstream port status' case. * If we got sink count changed it means @@ -3315,29 +4177,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd sizeof(hpd_irq_dpcd_data), "Status: "); - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) - break; - } - - if (pipe_ctx == NULL || pipe_ctx->stream == NULL) - return false; - - - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && - pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) - core_link_disable_stream(pipe_ctx); - } - - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && - pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) - core_link_enable_stream(link->dc->current_state, pipe_ctx); - } + if (defer_handling && has_left_work) + *has_left_work = true; + else + dc_link_dp_handle_link_loss(link); status = false; if (out_link_loss) @@ -3561,6 +4404,43 @@ static void get_active_converter_info( dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + union dp_dfp_cap_ext dfp_cap_ext; + memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); + core_link_read_dpcd( + link, + DP_DFP_CAPABILITY_EXTENSION_SUPPORT, + dfp_cap_ext.raw, + sizeof(dfp_cap_ext.raw)); + link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = + dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + + (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = + dfp_cap_ext.fields.max_video_h_active_width[0] + + (dfp_cap_ext.fields.max_video_h_active_width[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = + dfp_cap_ext.fields.max_video_v_active_height[0] + + (dfp_cap_ext.fields.max_video_v_active_height[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = + dfp_cap_ext.fields.encoding_format_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = + dfp_cap_ext.fields.rgb_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = + dfp_cap_ext.fields.ycbcr444_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = + dfp_cap_ext.fields.ycbcr422_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = + dfp_cap_ext.fields.ycbcr420_color_depth_caps; + DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); + DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); + DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); + DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); + DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); + } +#endif } static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, @@ -3620,7 +4500,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) bool dp_retrieve_lttpr_cap(struct dc_link *link) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t lttpr_dpcd_data[8]; + bool allow_lttpr_non_transparent_mode = 0; +#else uint8_t lttpr_dpcd_data[6]; +#endif bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable; bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; enum dc_status status = DC_ERROR_UNEXPECTED; @@ -3628,6 +4513,16 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 && + link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) { + allow_lttpr_non_transparent_mode = 1; + } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && + !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + allow_lttpr_non_transparent_mode = 1; + } +#endif + /* * Logic to determine LTTPR mode */ @@ -3635,13 +4530,21 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) if (vbios_lttpr_enable && vbios_lttpr_interop) link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; else if (!vbios_lttpr_enable && vbios_lttpr_interop) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (allow_lttpr_non_transparent_mode) +#else if (link->dc->config.allow_lttpr_non_transparent_mode) +#endif link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; else link->lttpr_mode = LTTPR_MODE_TRANSPARENT; } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support) +#else if (!link->dc->config.allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support) +#endif link->lttpr_mode = LTTPR_MODE_NON_LTTPR; else link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; @@ -3685,6 +4588,16 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; +#if defined(CONFIG_DRM_AMD_DC_DCN) + link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = + lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = + lttpr_dpcd_data[DP_PHY_REPEATER_128b_132b_RATES - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; +#endif + /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && link->dpcd_caps.lttpr_caps.max_lane_count > 0 && @@ -3935,16 +4848,82 @@ static bool retrieve_link_cap(struct dc_link *link) DP_DSC_SUPPORT, link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + status = core_link_read_dpcd( + link, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, + sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); + DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); + DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); + } +#else status = core_link_read_dpcd( link, DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); +#endif } if (!dpcd_read_sink_ext_caps(link)) link->dpcd_sink_ext_caps.raw = 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + link->dpcd_caps.channel_coding_cap.raw = dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_CAP - DP_DPCD_REV]; + + if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); + + core_link_read_dpcd(link, + DP_128b_132b_SUPPORTED_LINK_RATES, + &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, + sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); + if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) + link->reported_link_cap.link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) + link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) + link->reported_link_cap.link_rate = LINK_RATE_UHBR10; + else + dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); + DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); + DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", + link->reported_link_cap.link_rate / 100, + link->reported_link_cap.link_rate % 100); + + core_link_read_dpcd(link, + DP_SINK_VIDEO_FALLBACK_FORMATS, + &link->dpcd_caps.fallback_formats.raw, + sizeof(link->dpcd_caps.fallback_formats.raw)); + DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); + if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) + DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) + DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) + DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.raw == 0) { + DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); + link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; + } + + core_link_read_dpcd(link, + DP_FEC_CAPABILITY_1, + &link->dpcd_caps.fec_cap1.raw, + sizeof(link->dpcd_caps.fec_cap1.raw)); + DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); + if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) + DC_LOG_DP2("\tFEC aggregated error counters are supported"); + } +#endif + /* Connectivity log: detection */ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); @@ -4375,7 +5354,7 @@ bool dc_link_dp_set_test_pattern( * MuteAudioEndpoint(pPathMode->pDisplayPath, true); */ /* Blank stream */ - pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc); + pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); } dp_set_hw_test_pattern(link, test_pattern, @@ -4415,6 +5394,35 @@ bool dc_link_dp_set_test_pattern( case DP_TEST_PATTERN_CP2520_3: pattern = PHY_TEST_PATTERN_CP2520_3; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_TEST_PATTERN_128b_132b_TPS1: + pattern = PHY_TEST_PATTERN_128b_132b_TPS1; + break; + case DP_TEST_PATTERN_128b_132b_TPS2: + pattern = PHY_TEST_PATTERN_128b_132b_TPS2; + break; + case DP_TEST_PATTERN_PRBS9: + pattern = PHY_TEST_PATTERN_PRBS9; + break; + case DP_TEST_PATTERN_PRBS11: + pattern = PHY_TEST_PATTERN_PRBS11; + break; + case DP_TEST_PATTERN_PRBS15: + pattern = PHY_TEST_PATTERN_PRBS15; + break; + case DP_TEST_PATTERN_PRBS23: + pattern = PHY_TEST_PATTERN_PRBS23; + break; + case DP_TEST_PATTERN_PRBS31: + pattern = PHY_TEST_PATTERN_PRBS31; + break; + case DP_TEST_PATTERN_264BIT_CUSTOM: + pattern = PHY_TEST_PATTERN_264BIT_CUSTOM; + break; + case DP_TEST_PATTERN_SQUARE_PULSE: + pattern = PHY_TEST_PATTERN_SQUARE_PULSE; + break; +#endif default: return false; } @@ -4677,7 +5685,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready) */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -4697,7 +5705,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready) link_enc->funcs->fec_set_ready(link_enc, true); link->fec_state = dc_link_fec_ready; } else { - link_enc->funcs->fec_set_ready(link->link_enc, false); + link_enc->funcs->fec_set_ready(link_enc, false); link->fec_state = dc_link_fec_not_ready; dm_error("dpcd write failed to set fec_ready"); } @@ -4724,8 +5732,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link( - link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -4990,6 +5997,208 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings if ((link_settings->link_rate >= LINK_RATE_LOW) && (link_settings->link_rate <= LINK_RATE_HIGH3)) return DP_8b_10b_ENCODING; +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && + (link_settings->link_rate <= LINK_RATE_UHBR20)) + return DP_128b_132b_ENCODING; +#endif return DP_UNKNOWN_ENCODING; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) +static void get_lane_status( + struct dc_link *link, + uint32_t lane_count, + union lane_status *status, + union lane_align_status_updated *status_updated) +{ + unsigned int lane; + uint8_t dpcd_buf[3] = {0}; + + if (status == NULL || status_updated == NULL) { + return; + } + + core_link_read_dpcd( + link, + DP_LANE0_1_STATUS, + dpcd_buf, + sizeof(dpcd_buf)); + + for (lane = 0; lane < lane_count; lane++) { + status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane); + } + + status_updated->raw = dpcd_buf[2]; +} + +bool dpcd_write_128b_132b_sst_payload_allocation_table( + const struct dc_stream_state *stream, + struct dc_link *link, + struct link_mst_stream_allocation_table *proposed_table, + bool allocate) +{ + const uint8_t vc_id = 1; /// VC ID always 1 for SST + const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST + bool result = false; + uint8_t req_slot_count = 0; + struct fixed31_32 avg_time_slots_per_mtp = { 0 }; + union payload_table_update_status update_status = { 0 }; + const uint32_t max_retries = 30; + uint32_t retries = 0; + + if (allocate) { + avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + } else { + /// Leave req_slot_count = 0 if allocate is false. + } + + /// Write DPCD 2C0 = 1 to start updating + update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1; + core_link_write_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1); + + /// Program the changes in DPCD 1C0 - 1C2 + ASSERT(vc_id == 1); + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_SET, + &vc_id, + 1); + + ASSERT(start_time_slot == 0); + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_START_TIME_SLOT, + &start_time_slot, + 1); + + ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); /// Validation should filter out modes that exceed link BW + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT, + &req_slot_count, + 1); + + /// Poll till DPCD 2C0 read 1 + /// Try for at least 150ms (30 retries, with 5ms delay after each attempt) + + while (retries < max_retries) { + if (core_link_read_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1) == DC_OK) { + if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) { + DC_LOG_DP2("SST Update Payload: downstream payload table updated."); + result = true; + break; + } + } else { + union dpcd_rev dpcdRev; + + if (core_link_read_dpcd( + link, + DP_DPCD_REV, + &dpcdRev.raw, + 1) != DC_OK) { + DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision " + "of sink while polling payload table " + "updated status bit."); + break; + } + } + retries++; + udelay(5000); + } + + if (!result && retries == max_retries) { + DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, " + "continue on. Something is wrong with the branch."); + // TODO - DP2.0 Payload: Read and log the payload table from downstream branch + } + + proposed_table->stream_count = 1; /// Always 1 stream for SST + proposed_table->stream_allocations[0].slot_count = req_slot_count; + proposed_table->stream_allocations[0].vcp_id = vc_id; + + return result; +} + +bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link) +{ + /* + * wait for ACT handled + */ + int i; + const int act_retries = 30; + enum act_return_status result = ACT_FAILED; + union payload_table_update_status update_status = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated lane_status_updated; + + for (i = 0; i < act_retries; i++) { + get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated); + + if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_interlane_aligned(lane_status_updated)) { + DC_LOG_ERROR("SST Update Payload: Link loss occurred while " + "polling for ACT handled."); + result = ACT_LINK_LOST; + break; + } + core_link_read_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1); + + if (update_status.bits.ACT_HANDLED == 1) { + DC_LOG_DP2("SST Update Payload: ACT handled by downstream."); + result = ACT_SUCCESS; + break; + } + + udelay(5000); + } + + if (result == ACT_FAILED) { + DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, " + "continue on. Something is wrong with the branch."); + } + + return (result == ACT_SUCCESS); +} + +struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( + const struct dc_stream_state *stream, + const struct dc_link *link) +{ + struct fixed31_32 link_bw_effective = + dc_fixpt_from_int( + dc_link_bandwidth_kbps(link, &link->cur_link_settings)); + struct fixed31_32 timeslot_bw_effective = + dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); + struct fixed31_32 timing_bw = + dc_fixpt_from_int( + dc_bandwidth_in_kbps_from_timing(&stream->timing)); + struct fixed31_32 avg_time_slots_per_mtp = + dc_fixpt_div(timing_bw, timeslot_bw_effective); + + return avg_time_slots_per_mtp; +} + +bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ + return (pipe_ctx->stream_res.hpo_dp_stream_enc && + pipe_ctx->stream->link->hpo_dp_link_enc && + dc_is_dp_signal(pipe_ctx->stream->signal)); +} +#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c index 72970e49800a..7f25c11f4248 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c @@ -176,12 +176,15 @@ static void dpcd_reduce_address_range( uint8_t * const reduced_data, const uint32_t reduced_size) { - const uint32_t reduced_end_address = END_ADDRESS(reduced_address, reduced_size); - const uint32_t extended_end_address = END_ADDRESS(extended_address, extended_size); const uint32_t offset = reduced_address - extended_address; - if (extended_end_address == reduced_end_address && extended_address == reduced_address) - return; /* extended and reduced address ranges point to the same data */ + /* + * If the address is same, address was not extended. + * So we do not need to free any memory. + * The data is in original buffer(reduced_data). + */ + if (extended_data == reduced_data) + return; memcpy(&extended_data[offset], reduced_data, reduced_size); kfree(extended_data); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index de80a9ea4cfa..4dce25c39b75 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -35,78 +35,128 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream) int i; /* Loop over created link encoder objects. */ - for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { - link_enc = stream->ctx->dc->res_pool->link_encoders[i]; - - if (link_enc && - ((uint32_t)stream->signal & link_enc->output_signals)) { - if (dc_is_dp_signal(stream->signal)) { - /* DIGs do not support DP2.0 streams with 128b/132b encoding. */ - struct dc_link_settings link_settings = {0}; - - decide_link_settings(stream, &link_settings); - if ((link_settings.link_rate >= LINK_RATE_LOW) && - link_settings.link_rate <= LINK_RATE_HIGH3) { + if (stream) { + for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { + link_enc = stream->ctx->dc->res_pool->link_encoders[i]; + + /* Need to check link signal type rather than stream signal type which may not + * yet match. + */ + if (link_enc && ((uint32_t)stream->link->connector_signal & link_enc->output_signals)) { + if (dc_is_dp_signal(stream->signal)) { + /* DIGs do not support DP2.0 streams with 128b/132b encoding. */ + struct dc_link_settings link_settings = {0}; + + decide_link_settings(stream, &link_settings); + if ((link_settings.link_rate >= LINK_RATE_LOW) && + link_settings.link_rate <= LINK_RATE_HIGH3) { + is_dig_stream = true; + break; + } + } else { is_dig_stream = true; break; } - } else { - is_dig_stream = true; - break; } } } - return is_dig_stream; } -/* Update DIG link encoder resource tracking variables in dc_state. */ -static void update_link_enc_assignment( +static struct link_enc_assignment get_assignment(struct dc *dc, int i) +{ + struct link_enc_assignment assignment; + + if (dc->current_state->res_ctx.link_enc_cfg_ctx.mode == LINK_ENC_CFG_TRANSIENT) + assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i]; + else /* LINK_ENC_CFG_STEADY */ + assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + return assignment; +} + +/* Return stream using DIG link encoder resource. NULL if unused. */ +static struct dc_stream_state *get_stream_using_link_enc( + struct dc_state *state, + enum engine_id eng_id) +{ + struct dc_stream_state *stream = NULL; + int i; + + for (i = 0; i < state->stream_count; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if ((assignment.valid == true) && (assignment.eng_id == eng_id)) { + stream = state->streams[i]; + break; + } + } + + return stream; +} + +static void remove_link_enc_assignment( struct dc_state *state, struct dc_stream_state *stream, - enum engine_id eng_id, - bool add_enc) + enum engine_id eng_id) { int eng_idx; - int stream_idx; int i; if (eng_id != ENGINE_ID_UNKNOWN) { eng_idx = eng_id - ENGINE_ID_DIGA; - stream_idx = -1; - /* Index of stream in dc_state used to update correct entry in + /* stream ptr of stream in dc_state used to update correct entry in * link_enc_assignments table. */ - for (i = 0; i < state->stream_count; i++) { - if (stream == state->streams[i]) { - stream_idx = i; + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment.valid && assignment.stream == stream) { + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; + /* Only add link encoder back to availability pool if not being + * used by any other stream (i.e. removing SST stream or last MST stream). + */ + if (get_stream_using_link_enc(state, eng_id) == NULL) + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id; + stream->link_enc = NULL; break; } } + } +} + +static void add_link_enc_assignment( + struct dc_state *state, + struct dc_stream_state *stream, + enum engine_id eng_id) +{ + int eng_idx; + int i; - /* Update link encoder assignments table, link encoder availability - * pool and link encoder assigned to stream in state. - * Add/remove encoder resource to/from stream. + if (eng_id != ENGINE_ID_UNKNOWN) { + eng_idx = eng_id - ENGINE_ID_DIGA; + + /* stream ptr of stream in dc_state used to update correct entry in + * link_enc_assignments table. */ - if (stream_idx != -1) { - if (add_enc) { - state->res_ctx.link_enc_assignments[stream_idx] = (struct link_enc_assignment){ + for (i = 0; i < state->stream_count; i++) { + if (stream == state->streams[i]) { + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i] = (struct link_enc_assignment){ .valid = true, .ep_id = (struct display_endpoint_id) { .link_id = stream->link->link_id, .ep_type = stream->link->ep_type}, - .eng_id = eng_id}; - state->res_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN; + .eng_id = eng_id, + .stream = stream}; + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN; stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx]; - } else { - state->res_ctx.link_enc_assignments[stream_idx].valid = false; - state->res_ctx.link_enc_avail[eng_idx] = eng_id; - stream->link_enc = NULL; + break; } - } else { - dm_output_to_console("%s: Stream not found in dc_state.\n", __func__); } + + /* Attempted to add an encoder assignment for a stream not in dc_state. */ + ASSERT(i != state->stream_count); } } @@ -119,7 +169,7 @@ static enum engine_id find_first_avail_link_enc( int i; for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { - eng_id = state->res_ctx.link_enc_avail[i]; + eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i]; if (eng_id != ENGINE_ID_UNKNOWN) break; } @@ -127,30 +177,51 @@ static enum engine_id find_first_avail_link_enc( return eng_id; } -/* Return stream using DIG link encoder resource. NULL if unused. */ -static struct dc_stream_state *get_stream_using_link_enc( +static bool is_avail_link_enc(struct dc_state *state, enum engine_id eng_id) +{ + bool is_avail = false; + int eng_idx = eng_id - ENGINE_ID_DIGA; + + if (eng_id != ENGINE_ID_UNKNOWN && state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] != ENGINE_ID_UNKNOWN) + is_avail = true; + + return is_avail; +} + +/* Test for display_endpoint_id equality. */ +static bool are_ep_ids_equal(struct display_endpoint_id *lhs, struct display_endpoint_id *rhs) +{ + bool are_equal = false; + + if (lhs->link_id.id == rhs->link_id.id && + lhs->link_id.enum_id == rhs->link_id.enum_id && + lhs->link_id.type == rhs->link_id.type && + lhs->ep_type == rhs->ep_type) + are_equal = true; + + return are_equal; +} + +static struct link_encoder *get_link_enc_used_by_link( struct dc_state *state, - enum engine_id eng_id) + const struct dc_link *link) { - struct dc_stream_state *stream = NULL; - int stream_idx = -1; + struct link_encoder *link_enc = NULL; + struct display_endpoint_id ep_id; int i; + ep_id = (struct display_endpoint_id) { + .link_id = link->link_id, + .ep_type = link->ep_type}; + for (i = 0; i < state->stream_count; i++) { - struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i]; + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; - if (assignment.valid && (assignment.eng_id == eng_id)) { - stream_idx = i; - break; - } + if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) + link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA]; } - if (stream_idx != -1) - stream = state->streams[stream_idx]; - else - dm_output_to_console("%s: No stream using DIG(%d).\n", __func__, eng_id); - - return stream; + return link_enc; } void link_enc_cfg_init( @@ -161,10 +232,12 @@ void link_enc_cfg_init( for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { if (dc->res_pool->link_encoders[i]) - state->res_ctx.link_enc_avail[i] = (enum engine_id) i; + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = (enum engine_id) i; else - state->res_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; } + + state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } void link_enc_cfg_link_encs_assign( @@ -175,11 +248,17 @@ void link_enc_cfg_link_encs_assign( { enum engine_id eng_id = ENGINE_ID_UNKNOWN; int i; + int j; + + ASSERT(state->stream_count == stream_count); /* Release DIG link encoder resources before running assignment algorithm. */ for (i = 0; i < stream_count; i++) dc->res_pool->funcs->link_enc_unassign(state, streams[i]); + for (i = 0; i < MAX_PIPES; i++) + ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false); + /* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */ for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; @@ -191,26 +270,82 @@ void link_enc_cfg_link_encs_assign( /* Physical endpoints have a fixed mapping to DIG link encoders. */ if (!stream->link->is_dig_mapping_flexible) { eng_id = stream->link->eng_id; - update_link_enc_assignment(state, stream, eng_id, true); + add_link_enc_assignment(state, stream, eng_id); + } + } + + /* (b) Retain previous assignments for mappable endpoints if encoders still available. */ + eng_id = ENGINE_ID_UNKNOWN; + + if (state != dc->current_state) { + struct dc_state *prev_state = dc->current_state; + + for (i = 0; i < stream_count; i++) { + struct dc_stream_state *stream = state->streams[i]; + + /* Skip stream if not supported by DIG link encoder. */ + if (!is_dig_link_enc_stream(stream)) + continue; + + if (!stream->link->is_dig_mapping_flexible) + continue; + + for (j = 0; j < prev_state->stream_count; j++) { + struct dc_stream_state *prev_stream = prev_state->streams[j]; + + if (stream == prev_stream && stream->link == prev_stream->link && + prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) { + eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id; + if (is_avail_link_enc(state, eng_id)) + add_link_enc_assignment(state, stream, eng_id); + } + } } } - /* (b) Then assign encoders to mappable endpoints. */ + /* (c) Then assign encoders to remaining mappable endpoints. */ eng_id = ENGINE_ID_UNKNOWN; for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; /* Skip stream if not supported by DIG link encoder. */ - if (!is_dig_link_enc_stream(stream)) + if (!is_dig_link_enc_stream(stream)) { + ASSERT(stream->link->is_dig_mapping_flexible != true); continue; + } /* Mappable endpoints have a flexible mapping to DIG link encoders. */ if (stream->link->is_dig_mapping_flexible) { - eng_id = find_first_avail_link_enc(stream->ctx, state); - update_link_enc_assignment(state, stream, eng_id, true); + struct link_encoder *link_enc = NULL; + + /* Skip if encoder assignment retained in step (b) above. */ + if (stream->link_enc) + continue; + + /* For MST, multiple streams will share the same link / display + * endpoint. These streams should use the same link encoder + * assigned to that endpoint. + */ + link_enc = get_link_enc_used_by_link(state, stream->link); + if (link_enc == NULL) + eng_id = find_first_avail_link_enc(stream->ctx, state); + else + eng_id = link_enc->preferred_engine; + add_link_enc_assignment(state, stream, eng_id); } } + + link_enc_cfg_validate(dc, state); + + /* Update transient assignments. */ + for (i = 0; i < MAX_PIPES; i++) { + dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i] = + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + } + + /* Current state mode will be set to steady once this state committed. */ + state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } void link_enc_cfg_link_enc_unassign( @@ -226,16 +361,16 @@ void link_enc_cfg_link_enc_unassign( if (stream->link_enc) eng_id = stream->link_enc->preferred_engine; - update_link_enc_assignment(state, stream, eng_id, false); + remove_link_enc_assignment(state, stream, eng_id); } bool link_enc_cfg_is_transmitter_mappable( - struct dc_state *state, + struct dc *dc, struct link_encoder *link_enc) { bool is_mappable = false; enum engine_id eng_id = link_enc->preferred_engine; - struct dc_stream_state *stream = get_stream_using_link_enc(state, eng_id); + struct dc_stream_state *stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id); if (stream) is_mappable = stream->link->is_dig_mapping_flexible; @@ -243,73 +378,214 @@ bool link_enc_cfg_is_transmitter_mappable( return is_mappable; } -struct dc_link *link_enc_cfg_get_link_using_link_enc( - struct dc_state *state, +struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc( + struct dc *dc, enum engine_id eng_id) { - struct dc_link *link = NULL; - int stream_idx = -1; + struct dc_stream_state *stream = NULL; int i; - for (i = 0; i < state->stream_count; i++) { - struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i]; + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = get_assignment(dc, i); - if (assignment.valid && (assignment.eng_id == eng_id)) { - stream_idx = i; + if ((assignment.valid == true) && (assignment.eng_id == eng_id)) { + stream = assignment.stream; break; } } - if (stream_idx != -1) - link = state->streams[stream_idx]->link; - else - dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id); + return stream; +} + +struct dc_link *link_enc_cfg_get_link_using_link_enc( + struct dc *dc, + enum engine_id eng_id) +{ + struct dc_link *link = NULL; + struct dc_stream_state *stream = NULL; + + stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id); + + if (stream) + link = stream->link; + // dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id); return link; } struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( - struct dc_state *state, + struct dc *dc, const struct dc_link *link) { struct link_encoder *link_enc = NULL; struct display_endpoint_id ep_id; - int stream_idx = -1; int i; ep_id = (struct display_endpoint_id) { .link_id = link->link_id, .ep_type = link->ep_type}; - for (i = 0; i < state->stream_count; i++) { - struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i]; - - if (assignment.valid && - assignment.ep_id.link_id.id == ep_id.link_id.id && - assignment.ep_id.link_id.enum_id == ep_id.link_id.enum_id && - assignment.ep_id.link_id.type == ep_id.link_id.type && - assignment.ep_id.ep_type == ep_id.ep_type) { - stream_idx = i; + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = get_assignment(dc, i); + + if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) { + link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA]; break; } } - if (stream_idx != -1) - link_enc = state->streams[stream_idx]->link_enc; - return link_enc; } -struct link_encoder *link_enc_cfg_get_next_avail_link_enc( - const struct dc *dc, - const struct dc_state *state) +struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc) { struct link_encoder *link_enc = NULL; - enum engine_id eng_id; + enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS]; + int i; + + for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++) + encs_assigned[i] = ENGINE_ID_UNKNOWN; + + /* Add assigned encoders to list. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = get_assignment(dc, i); - eng_id = find_first_avail_link_enc(dc->ctx, state); - if (eng_id != ENGINE_ID_UNKNOWN) - link_enc = dc->res_pool->link_encoders[eng_id - ENGINE_ID_DIGA]; + if (assignment.valid) + encs_assigned[assignment.eng_id - ENGINE_ID_DIGA] = assignment.eng_id; + } + + for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { + if (encs_assigned[i] == ENGINE_ID_UNKNOWN) { + link_enc = dc->res_pool->link_encoders[i]; + break; + } + } return link_enc; } + +struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( + struct dc *dc, + const struct dc_stream_state *stream) +{ + struct link_encoder *link_enc; + + link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, stream->link); + + return link_enc; +} + +bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id) +{ + bool is_avail = true; + int i; + + /* Add assigned encoders to list. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = get_assignment(dc, i); + + if (assignment.valid && assignment.eng_id == eng_id) { + is_avail = false; + break; + } + } + + return is_avail; +} + +bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state) +{ + bool is_valid = false; + bool valid_entries = true; + bool valid_stream_ptrs = true; + bool valid_uniqueness = true; + bool valid_avail = true; + bool valid_streams = true; + int i, j; + uint8_t valid_count = 0; + uint8_t dig_stream_count = 0; + int matching_stream_ptrs = 0; + int eng_ids_per_ep_id[MAX_PIPES] = {0}; + + /* (1) No. valid entries same as stream count. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment.valid) + valid_count++; + + if (is_dig_link_enc_stream(state->streams[i])) + dig_stream_count++; + } + if (valid_count != dig_stream_count) + valid_entries = false; + + /* (2) Matching stream ptrs. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment.valid) { + if (assignment.stream == state->streams[i]) + matching_stream_ptrs++; + else + valid_stream_ptrs = false; + } + } + + /* (3) Each endpoint assigned unique encoder. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment_i = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment_i.valid) { + struct display_endpoint_id ep_id_i = assignment_i.ep_id; + + eng_ids_per_ep_id[i]++; + for (j = 0; j < MAX_PIPES; j++) { + struct link_enc_assignment assignment_j = + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j]; + + if (j == i) + continue; + + if (assignment_j.valid) { + struct display_endpoint_id ep_id_j = assignment_j.ep_id; + + if (are_ep_ids_equal(&ep_id_i, &ep_id_j) && + assignment_i.eng_id != assignment_j.eng_id) { + valid_uniqueness = false; + eng_ids_per_ep_id[i]++; + } + } + } + } + } + + /* (4) Assigned encoders not in available pool. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment.valid) { + for (j = 0; j < dc->res_pool->res_cap->num_dig_link_enc; j++) { + if (state->res_ctx.link_enc_cfg_ctx.link_enc_avail[j] == assignment.eng_id) { + valid_avail = false; + break; + } + } + } + } + + /* (5) All streams have valid link encoders. */ + for (i = 0; i < state->stream_count; i++) { + struct dc_stream_state *stream = state->streams[i]; + + if (is_dig_link_enc_stream(stream) && stream->link_enc == NULL) { + valid_streams = false; + break; + } + } + + is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams; + ASSERT(is_valid); + + return is_valid; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 9c51cd09dcf1..cc4b28e94727 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -17,6 +17,7 @@ #include "link_enc_cfg.h" #include "clk_mgr.h" #include "inc/link_dpcd.h" +#include "dccg.h" static uint8_t convert_to_count(uint8_t lttpr_repeater_count) { @@ -61,6 +62,13 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on) sizeof(state)); } +void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) +{ + if (link != NULL && link->dc->debug.enable_driver_sequence_debug) + core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, + &dp_test_mode, sizeof(dp_test_mode)); +} + void dp_enable_link_phy( struct dc_link *link, enum signal_type signal, @@ -79,7 +87,7 @@ void dp_enable_link_phy( /* Link should always be assigned encoder when en-/disabling. */ if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -111,12 +119,37 @@ void dp_enable_link_phy( link->cur_link_settings = *link_settings; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + /* TODO - DP2.0 HW: notify link rate change here */ + } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (dc->clk_mgr->funcs->notify_link_rate_change) + dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); + } +#else if (dc->clk_mgr->funcs->notify_link_rate_change) dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); - +#endif if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + enable_dp_hpo_output(link, link_settings); + } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (dc_is_dp_sst_signal(signal)) { + link_enc->funcs->enable_dp_output( + link_enc, + link_settings, + clock_source); + } else { + link_enc->funcs->enable_dp_mst_output( + link_enc, + link_settings, + clock_source); + } + } +#else if (dc_is_dp_sst_signal(signal)) { link_enc->funcs->enable_dp_output( link_enc, @@ -128,10 +161,11 @@ void dp_enable_link_phy( link_settings, clock_source); } - +#endif if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); dp_receiver_power_ctrl(link, true); } @@ -206,11 +240,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc; +#endif struct link_encoder *link_enc; /* Link should always be assigned encoder when en-/disabling. */ if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -221,18 +258,34 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) if (signal == SIGNAL_TYPE_EDP) { if (link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, false); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) + disable_dp_hpo_output(link, signal); + else + link_enc->funcs->disable_output(link_enc, signal); +#else link_enc->funcs->disable_output(link_enc, signal); +#endif link->dc->hwss.edp_power_control(link, false); } else { if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING && + hpo_link_enc) + disable_dp_hpo_output(link, signal); + else + link_enc->funcs->disable_output(link_enc, signal); +#else link_enc->funcs->disable_output(link_enc, signal); - +#endif if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); } + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + /* Clear current link setting.*/ memset(&link->cur_link_settings, 0, sizeof(link->cur_link_settings)); @@ -273,6 +326,14 @@ bool dp_set_hw_training_pattern( case DP_TRAINING_PATTERN_SEQUENCE_4: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; + break; + case DP_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; + break; +#endif default: break; } @@ -282,6 +343,10 @@ bool dp_set_hw_training_pattern( return true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define DC_LOGGER \ + link->ctx->logger +#endif void dp_set_hw_lane_settings( struct dc_link *link, const struct link_training_settings *link_settings, @@ -293,7 +358,20 @@ void dp_set_hw_lane_settings( return; /* call Encoder to set lane settings */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings->link_settings) == + DP_128b_132b_ENCODING) { + link->hpo_dp_link_enc->funcs->set_ffe( + link->hpo_dp_link_enc, + &link_settings->link_settings, + link_settings->lane_settings[0].FFE_PRESET.raw); + } else if (dp_get_link_encoding_format(&link_settings->link_settings) + == DP_8b_10b_ENCODING) { + encoder->funcs->dp_set_lane_settings(encoder, link_settings); + } +#else encoder->funcs->dp_set_lane_settings(encoder, link_settings); +#endif } void dp_set_hw_test_pattern( @@ -304,13 +382,16 @@ void dp_set_hw_test_pattern( { struct encoder_set_dp_phy_pattern_param pattern_param = {0}; struct link_encoder *encoder; +#if defined(CONFIG_DRM_AMD_DC_DCN) + enum dp_link_encoding link_encoding_format = dp_get_link_encoding_format(&link->cur_link_settings); +#endif /* Access link encoder based on whether it is statically * or dynamically assigned to a link. */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) - encoder = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + encoder = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); else encoder = link->link_enc; @@ -319,8 +400,28 @@ void dp_set_hw_test_pattern( pattern_param.custom_pattern_size = custom_pattern_size; pattern_param.dp_panel_mode = dp_get_panel_mode(link); +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (link_encoding_format) { + case DP_128b_132b_ENCODING: + link->hpo_dp_link_enc->funcs->set_link_test_pattern( + link->hpo_dp_link_enc, &pattern_param); + break; + case DP_8b_10b_ENCODING: + ASSERT(encoder); + encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param); + break; + default: + DC_LOG_ERROR("%s: Unknown link encoding format.", __func__); + break; + } +#else encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param); +#endif + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); } +#if defined(CONFIG_DRM_AMD_DC_DCN) +#undef DC_LOGGER +#endif void dp_retrain_link_dp_test(struct dc_link *link, struct dc_link_settings *link_setting, @@ -338,7 +439,7 @@ void dp_retrain_link_dp_test(struct dc_link *link, pipes[i].stream->link == link) { udelay(100); - pipes[i].stream_res.stream_enc->funcs->dp_blank( + pipes[i].stream_res.stream_enc->funcs->dp_blank(link, pipes[i].stream_res.stream_enc); /* disable any test pattern that might be active */ @@ -351,9 +452,10 @@ void dp_retrain_link_dp_test(struct dc_link *link, if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only) (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio); - link->link_enc->funcs->disable_output( - link->link_enc, - SIGNAL_TYPE_DISPLAY_PORT); + if (link->link_enc) + link->link_enc->funcs->disable_output( + link->link_enc, + SIGNAL_TYPE_DISPLAY_PORT); /* Clear current link setting. */ memset(&link->cur_link_settings, 0, @@ -468,7 +570,12 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) + && !is_dp_128b_132b_signal(pipe_ctx)) { +#else if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { +#endif DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -495,13 +602,22 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) /* disable DSC in stream encoder */ if (dc_is_dp_signal(stream->signal)) { - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( - pipe_ctx->stream_res.stream_enc, - OPTC_DSC_DISABLED, 0, 0); - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, false, NULL); - } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else +#endif + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( + pipe_ctx->stream_res.stream_enc, + OPTC_DSC_DISABLED, 0, 0); + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); + } } /* disable DSC block */ @@ -535,7 +651,16 @@ out: return result; } -bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) +/* + * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; + * hence PPS info packet update need to use frame update instead of immediate update. + * Added parameter immediate_update for this purpose. + * The decision to use frame update is hard-coded in function dp_update_dsc_config(), + * which is the only place where a "false" would be passed in for param immediate_update. + * + * immediate_update is only applicable when DSC is enabled. + */ +bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; struct dc_stream_state *stream = pipe_ctx->stream; @@ -562,16 +687,35 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); if (dc_is_dp_signal(stream->signal)) { DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, - true, - &dsc_packed_pps[0]); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); + else +#endif + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); } } else { /* disable DSC PPS in stream encoder */ if (dc_is_dp_signal(stream->signal)) { - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, false, NULL); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else +#endif + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); } } @@ -589,7 +733,171 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) return false; dp_set_dsc_on_stream(pipe_ctx, true); - dp_set_dsc_pps_sdp(pipe_ctx, true); + dp_set_dsc_pps_sdp(pipe_ctx, true, false); return true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +#undef DC_LOGGER +#define DC_LOGGER \ + link->ctx->logger + +static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) +{ + switch (link->link_enc->transmitter) { + case TRANSMITTER_UNIPHY_A: + return PHYD32CLKA; + case TRANSMITTER_UNIPHY_B: + return PHYD32CLKB; + case TRANSMITTER_UNIPHY_C: + return PHYD32CLKC; + case TRANSMITTER_UNIPHY_D: + return PHYD32CLKD; + case TRANSMITTER_UNIPHY_E: + return PHYD32CLKE; + default: + return PHYD32CLKA; + } +} + +void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings) +{ + const struct dc *dc = link->dc; + enum phyd32clk_clock_source phyd32clk; + + /* Enable PHY PLL at target bit rate + * UHBR10 = 10Gbps (SYMCLK32 = 312.5MHz) + * UBR13.5 = 13.5Gbps (SYMCLK32 = 421.875MHz) + * UHBR20 = 20Gbps (SYMCLK32 = 625MHz) + */ + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + switch (link_settings->link_rate) { + case LINK_RATE_UHBR10: + dm_set_phyd32clk(dc->ctx, 312500); + break; + case LINK_RATE_UHBR13_5: + dm_set_phyd32clk(dc->ctx, 412875); + break; + case LINK_RATE_UHBR20: + dm_set_phyd32clk(dc->ctx, 625000); + break; + default: + return; + } + } else { + /* DP2.0 HW: call transmitter control to enable PHY */ + link->hpo_dp_link_enc->funcs->enable_link_phy( + link->hpo_dp_link_enc, + link_settings, + link->link_enc->transmitter); + } + + /* DCCG muxing and DTBCLK DTO */ + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->res_pool->dccg->funcs->set_physymclk( + dc->res_pool->dccg, + link->link_enc_hw_inst, + PHYSYMCLK_FORCE_SRC_PHYD32CLK, + true); + + phyd32clk = get_phyd32clk_src(link); + dc->res_pool->dccg->funcs->enable_symclk32_le( + dc->res_pool->dccg, + link->hpo_dp_link_enc->inst, + phyd32clk); + link->hpo_dp_link_enc->funcs->link_enable( + link->hpo_dp_link_enc, + link_settings->lane_count); + } +} + +void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal) +{ + const struct dc *dc = link->dc; + + link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc); + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->res_pool->dccg->funcs->disable_symclk32_le( + dc->res_pool->dccg, + link->hpo_dp_link_enc->inst); + + dc->res_pool->dccg->funcs->set_physymclk( + dc->res_pool->dccg, + link->link_enc_hw_inst, + PHYSYMCLK_FORCE_SRC_SYMCLK, + false); + + dm_set_phyd32clk(dc->ctx, 0); + } else { + /* DP2.0 HW: call transmitter control to disable PHY */ + link->hpo_dp_link_enc->funcs->disable_link_phy( + link->hpo_dp_link_enc, + signal); + } +} + +void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct pipe_ctx *odm_pipe; + int odm_combine_num_segments = 1; + enum phyd32clk_clock_source phyd32clk; + + if (enable) { + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_combine_num_segments++; + + dc->res_pool->dccg->funcs->set_dpstreamclk( + dc->res_pool->dccg, + DTBCLK0, + pipe_ctx->stream_res.tg->inst); + + phyd32clk = get_phyd32clk_src(stream->link); + dc->res_pool->dccg->funcs->enable_symclk32_se( + dc->res_pool->dccg, + pipe_ctx->stream_res.hpo_dp_stream_enc->inst, + phyd32clk); + + dc->res_pool->dccg->funcs->set_dtbclk_dto( + dc->res_pool->dccg, + pipe_ctx->stream_res.tg->inst, + stream->phy_pix_clk, + odm_combine_num_segments, + &stream->timing); + } else { + dc->res_pool->dccg->funcs->set_dtbclk_dto( + dc->res_pool->dccg, + pipe_ctx->stream_res.tg->inst, + 0, + 0, + &stream->timing); + dc->res_pool->dccg->funcs->disable_symclk32_se( + dc->res_pool->dccg, + pipe_ctx->stream_res.hpo_dp_stream_enc->inst); + dc->res_pool->dccg->funcs->set_dpstreamclk( + dc->res_pool->dccg, + REFCLK, + pipe_ctx->stream_res.tg->inst); + } +} + +void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link) +{ + const struct dc *dc = link->dc; + struct dc_state *state = dc->current_state; + uint8_t i; + + for (i = 0; i < MAX_PIPES; i++) { + if (state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc && + state->res_ctx.pipe_ctx[i].stream && + state->res_ctx.pipe_ctx[i].stream->link == link && + !state->res_ctx.pipe_ctx[i].stream->dpms_off) { + setup_dp_hpo_stream(&state->res_ctx.pipe_ctx[i], false); + } + } +} + +#undef DC_LOGGER +#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index a60396d5be44..adc656fc4848 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -41,6 +41,8 @@ #include "set_mode_types.h" #include "virtual/virtual_stream_encoder.h" #include "dpcd_defs.h" +#include "link_enc_cfg.h" +#include "dc_link_dp.h" #if defined(CONFIG_DRM_AMD_DC_SI) #include "dce60/dce60_resource.h" @@ -347,6 +349,29 @@ bool resource_construct( } #if defined(CONFIG_DRM_AMD_DC_DCN) + pool->hpo_dp_stream_enc_count = 0; + if (create_funcs->create_hpo_dp_stream_encoder) { + for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) { + pool->hpo_dp_stream_enc[i] = create_funcs->create_hpo_dp_stream_encoder(i+ENGINE_ID_HPO_DP_0, ctx); + if (pool->hpo_dp_stream_enc[i] == NULL) + DC_ERR("DC: failed to create HPO DP stream encoder!\n"); + pool->hpo_dp_stream_enc_count++; + + } + } + + pool->hpo_dp_link_enc_count = 0; + if (create_funcs->create_hpo_dp_link_encoder) { + for (i = 0; i < caps->num_hpo_dp_link_encoder; i++) { + pool->hpo_dp_link_enc[i] = create_funcs->create_hpo_dp_link_encoder(i, ctx); + if (pool->hpo_dp_link_enc[i] == NULL) + DC_ERR("DC: failed to create HPO DP link encoder!\n"); + pool->hpo_dp_link_enc_count++; + } + } +#endif + +#if defined(CONFIG_DRM_AMD_DC_DCN) for (i = 0; i < caps->num_mpc_3dlut; i++) { pool->mpc_lut[i] = dc_create_3dlut_func(); if (pool->mpc_lut[i] == NULL) @@ -1665,6 +1690,22 @@ static void update_stream_engine_usage( } } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void update_hpo_dp_stream_engine_usage( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct hpo_dp_stream_encoder *hpo_dp_stream_enc, + bool acquired) +{ + int i; + + for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { + if (pool->hpo_dp_stream_enc[i] == hpo_dp_stream_enc) + res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired; + } +} +#endif + /* TODO: release audio object */ void update_audio_usage( struct resource_context *res_ctx, @@ -1709,6 +1750,26 @@ static int acquire_first_free_pipe( return -1; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_stream_state *stream) +{ + int i; + + for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { + if (!res_ctx->is_hpo_dp_stream_enc_acquired[i] && + pool->hpo_dp_stream_enc[i]) { + + return pool->hpo_dp_stream_enc[i]; + } + } + + return NULL; +} +#endif + static struct audio *find_first_free_audio( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1799,6 +1860,15 @@ enum dc_status dc_remove_stream_from_ctx( if (dc->res_pool->funcs->link_enc_unassign) dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(del_pipe)) { + update_hpo_dp_stream_engine_usage( + &new_ctx->res_ctx, dc->res_pool, + del_pipe->stream_res.hpo_dp_stream_enc, + false); + } +#endif + if (del_pipe->stream_res.audio) update_audio_usage( &new_ctx->res_ctx, @@ -2051,6 +2121,31 @@ enum dc_status resource_map_pool_resources( pipe_ctx->stream_res.stream_enc, true); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Allocate DP HPO Stream Encoder based on signal, hw capabilities + * and link settings + */ + if (dc_is_dp_signal(stream->signal) && + dc->caps.dp_hpo) { + struct dc_link_settings link_settings = {0}; + + decide_link_settings(stream, &link_settings); + if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) { + pipe_ctx->stream_res.hpo_dp_stream_enc = + find_first_free_match_hpo_dp_stream_enc_for_link( + &context->res_ctx, pool, stream); + + if (!pipe_ctx->stream_res.hpo_dp_stream_enc) + return DC_NO_STREAM_ENC_RESOURCE; + + update_hpo_dp_stream_engine_usage( + &context->res_ctx, pool, + pipe_ctx->stream_res.hpo_dp_stream_enc, + true); + } + } +#endif + /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && @@ -2147,7 +2242,7 @@ enum dc_status dc_validate_global_state( * Update link encoder to stream assignment. * TODO: Split out reason allocation from validation. */ - if (dc->res_pool->funcs->link_encs_assign) + if (dc->res_pool->funcs->link_encs_assign && fast_validate == false) dc->res_pool->funcs->link_encs_assign( dc, new_ctx, new_ctx->streams, new_ctx->stream_count); #endif @@ -2726,9 +2821,24 @@ bool pipe_need_reprogram( if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc) return true; - /* DIG link encoder resource assignment for stream changed. */ - if (pipe_ctx_old->stream->link_enc != pipe_ctx->stream->link_enc) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc) return true; +#endif + + /* DIG link encoder resource assignment for stream changed. */ + if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) { + bool need_reprogram = false; + struct dc *dc = pipe_ctx_old->stream->ctx->dc; + enum link_enc_cfg_mode mode = dc->current_state->res_ctx.link_enc_cfg_ctx.mode; + + dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; + if (link_enc_cfg_get_link_enc_used_by_stream(dc, pipe_ctx_old->stream) != pipe_ctx->stream->link_enc) + need_reprogram = true; + dc->current_state->res_ctx.link_enc_cfg_ctx.mode = mode; + + return need_reprogram; + } return false; } @@ -2871,7 +2981,8 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) res = DC_FAIL_CONTROLLER_VALIDATE; if (res == DC_OK) { - if (!link->link_enc->funcs->validate_output_with_stream( + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + !link->link_enc->funcs->validate_output_with_stream( link->link_enc, stream)) res = DC_FAIL_ENC_VALIDATE; } @@ -2975,3 +3086,22 @@ void get_audio_check(struct audio_info *aud_modes, } } +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( + const struct resource_pool *pool) +{ + uint8_t i; + struct hpo_dp_link_encoder *enc = NULL; + + ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS); + + for (i = 0; i < pool->hpo_dp_link_enc_count; i++) { + if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) { + enc = pool->hpo_dp_link_enc[i]; + break; + } + } + + return enc; +} +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3ab52d9a82cf..e5dcbee6e672 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ /* forward declaration */ struct aux_payload; -#define DC_VER "3.2.149" +#define DC_VER "3.2.154" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -183,6 +183,9 @@ struct dc_caps { unsigned int cursor_cache_size; struct dc_plane_cap planes[MAX_PLANES]; struct dc_color_caps color; +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool dp_hpo; +#endif bool vbios_lttpr_aware; bool vbios_lttpr_enable; }; @@ -289,7 +292,15 @@ struct dc_cap_funcs { struct link_training_settings; - +#if defined(CONFIG_DRM_AMD_DC_DCN) +union allow_lttpr_non_transparent_mode { + struct { + bool DP1_4A : 1; + bool DP2_0 : 1; + } bits; + unsigned char raw; +}; +#endif /* Structure to hold configuration flags set by dm at dc creation. */ struct dc_config { bool gpu_vm_support; @@ -302,7 +313,11 @@ struct dc_config { bool edp_no_power_sequencing; bool force_enum_edp; bool forced_clocks; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode; +#else bool allow_lttpr_non_transparent_mode; +#endif bool multi_mon_pp_mclk_switch; bool disable_dmcu; bool enable_4to1MPC; @@ -456,6 +471,8 @@ union mem_low_power_enable_options { bool cm: 1; bool mpc: 1; bool optc: 1; + bool vpg: 1; + bool afmt: 1; } bits; uint32_t u32All; }; @@ -614,16 +631,19 @@ struct dc_debug_options { bool enable_dmcub_surface_flip; bool usbc_combo_phy_reset_wa; bool enable_dram_clock_change_one_display_vactive; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* TODO - remove once tested */ + bool legacy_dp2_lt; +#endif union mem_low_power_enable_options enable_mem_low_power; bool force_vblank_alignment; /* Enable dmub aux for legacy ddc */ bool enable_dmub_aux_for_legacy_ddc; bool optimize_edp_link_rate; /* eDP ILR */ - /* force enable edp FEC */ - bool force_enable_edp_fec; /* FEC/PSR1 sequence enable delay in 100us */ uint8_t fec_enable_delay_in100us; + bool enable_driver_sequence_debug; #if defined(CONFIG_DRM_AMD_DC_DCN) bool disable_z10; bool enable_sw_cntl_psr; @@ -1146,6 +1166,12 @@ struct dpcd_caps { struct dc_lttpr_caps lttpr_caps; struct psr_caps psr_caps; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; + union dp_main_line_channel_coding_cap channel_coding_cap; + union dp_sink_video_fallback_formats fallback_formats; + union dp_fec_capability1 fec_cap1; +#endif }; union dpcd_sink_ext_caps { @@ -1337,7 +1363,7 @@ void dc_hardware_release(struct dc *dc); bool dc_set_psr_allow_active(struct dc *dc, bool enable); #if defined(CONFIG_DRM_AMD_DC_DCN) -void dc_z10_restore(struct dc *dc); +void dc_z10_restore(const struct dc *dc); void dc_z10_save_init(struct dc *dc); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 4f54bde1bb1c..a5e798b5da79 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -53,7 +53,17 @@ enum dc_link_rate { LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane +#if defined(CONFIG_DRM_AMD_DC_DCN) + LINK_RATE_HIGH3 = 0x1E, // Rate_8 (HBR3)- 8.10 Gbps/Lane + /* Starting from DP2.0 link rate enum directly represents actual + * link rate value in unit of 10 mbps + */ + LINK_RATE_UHBR10 = 1000, // UHBR10 - 10.0 Gbps/Lane + LINK_RATE_UHBR13_5 = 1350, // UHBR13.5 - 13.5 Gbps/Lane + LINK_RATE_UHBR20 = 2000, // UHBR10 - 20.0 Gbps/Lane +#else LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane +#endif }; enum dc_link_spread { @@ -90,17 +100,47 @@ enum dc_post_cursor2 { POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3, }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dc_dp_ffe_preset_level { + DP_FFE_PRESET_LEVEL0 = 0, + DP_FFE_PRESET_LEVEL1, + DP_FFE_PRESET_LEVEL2, + DP_FFE_PRESET_LEVEL3, + DP_FFE_PRESET_LEVEL4, + DP_FFE_PRESET_LEVEL5, + DP_FFE_PRESET_LEVEL6, + DP_FFE_PRESET_LEVEL7, + DP_FFE_PRESET_LEVEL8, + DP_FFE_PRESET_LEVEL9, + DP_FFE_PRESET_LEVEL10, + DP_FFE_PRESET_LEVEL11, + DP_FFE_PRESET_LEVEL12, + DP_FFE_PRESET_LEVEL13, + DP_FFE_PRESET_LEVEL14, + DP_FFE_PRESET_LEVEL15, + DP_FFE_PRESET_MAX_LEVEL = DP_FFE_PRESET_LEVEL15, +}; +#endif + enum dc_dp_training_pattern { DP_TRAINING_PATTERN_SEQUENCE_1 = 0, DP_TRAINING_PATTERN_SEQUENCE_2, DP_TRAINING_PATTERN_SEQUENCE_3, DP_TRAINING_PATTERN_SEQUENCE_4, DP_TRAINING_PATTERN_VIDEOIDLE, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_128b_132b_TPS1, + DP_128b_132b_TPS2, + DP_128b_132b_TPS2_CDS, +#endif }; enum dp_link_encoding { DP_UNKNOWN_ENCODING = 0, DP_8b_10b_ENCODING = 1, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_128b_132b_ENCODING = 2, +#endif }; struct dc_link_settings { @@ -112,10 +152,26 @@ struct dc_link_settings { bool dpcd_source_device_specific_field_support; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +union dc_dp_ffe_preset { + struct { + uint8_t level : 4; + uint8_t reserved : 1; + uint8_t no_preshoot : 1; + uint8_t no_deemphasis : 1; + uint8_t method2 : 1; + } settings; + uint8_t raw; +}; +#endif + struct dc_lane_settings { enum dc_voltage_swing VOLTAGE_SWING; enum dc_pre_emphasis PRE_EMPHASIS; enum dc_post_cursor2 POST_CURSOR2; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union dc_dp_ffe_preset FFE_PRESET; +#endif }; struct dc_link_training_settings { @@ -127,6 +183,9 @@ struct dc_link_training_overrides { enum dc_voltage_swing *voltage_swing; enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union dc_dp_ffe_preset *ffe_preset; +#endif uint16_t *cr_pattern_time; uint16_t *eq_pattern_time; @@ -140,6 +199,16 @@ struct dc_link_training_overrides { bool *fec_enable; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +union payload_table_update_status { + struct { + uint8_t VC_PAYLOAD_TABLE_UPDATED:1; + uint8_t ACT_HANDLED:1; + } bits; + uint8_t raw; +}; +#endif + union dpcd_rev { struct { uint8_t MINOR:4; @@ -227,7 +296,14 @@ union lane_align_status_updated { struct { uint8_t INTERLANE_ALIGN_DONE:1; uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t EQ_INTERLANE_ALIGN_DONE_128b_132b:1; + uint8_t CDS_INTERLANE_ALIGN_DONE_128b_132b:1; + uint8_t LT_FAILED_128b_132b:1; + uint8_t RESERVED:1; +#else uint8_t RESERVED:4; +#endif uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1; uint8_t LINK_STATUS_UPDATED:1; } bits; @@ -240,6 +316,12 @@ union lane_adjust { uint8_t PRE_EMPHASIS_LANE:2; uint8_t RESERVED:4; } bits; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct { + uint8_t PRESET_VALUE :4; + uint8_t RESERVED :4; + } tx_ffe; +#endif uint8_t raw; }; @@ -269,6 +351,12 @@ union dpcd_training_lane { uint8_t MAX_PRE_EMPHASIS_REACHED:1; uint8_t RESERVED:2; } bits; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct { + uint8_t PRESET_VALUE :4; + uint8_t RESERVED :4; + } tx_ffe; +#endif uint8_t raw; }; @@ -551,12 +639,18 @@ union test_response { union phy_test_pattern { struct { +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* This field is 7 bits for DP2.0 */ + uint8_t PATTERN :7; + uint8_t RESERVED :1; +#else /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 * and 3 bits for DP1.2. */ uint8_t PATTERN :3; /* BY speci, bit7:2 is 0 for DP1.1. */ uint8_t RESERVED :5; +#endif } bits; uint8_t raw; }; @@ -634,7 +728,14 @@ union dpcd_fec_capability { uint8_t UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t CORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t BIT_ERROR_COUNT_CAPABLE:1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t PARITY_BLOCK_ERROR_COUNT_CAPABLE:1; + uint8_t ARITY_BIT_ERROR_COUNT_CAPABLE:1; + uint8_t FEC_RUNNING_INDICATOR_SUPPORTED:1; + uint8_t FEC_ERROR_REPORTING_POLICY_SUPPORTED:1; +#else uint8_t RESERVED:4; +#endif } bits; uint8_t raw; }; @@ -758,4 +859,125 @@ struct psr_caps { bool psr_exit_link_training_required; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define DP_MAIN_LINK_CHANNEL_CODING_CAP 0x006 +#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020 +#define DP_FEC_CAPABILITY_1 0x091 +#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3 +#define DP_DSC_CONFIGURATION 0x161 +#define DP_PHY_SQUARE_PATTERN 0x249 +#define DP_128b_132b_SUPPORTED_LINK_RATES 0x2215 +#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216 +#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230 +#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250 +#define DP_DSC_SUPPORT_AND_DECODER_COUNT 0x2260 +#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 +# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0) +# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1) +# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1 +# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5) +# define DP_DSC_DECODER_COUNT_SHIFT 5 +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xF0006 +#define DP_PHY_REPEATER_128b_132b_RATES 0xF0007 +#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xF0022 +#define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) +/* TODO - Use DRM header to replace above once available */ + +union dp_main_line_channel_coding_cap { + struct { + uint8_t DP_8b_10b_SUPPORTED :1; + uint8_t DP_128b_132b_SUPPORTED :1; + uint8_t RESERVED :6; + } bits; + uint8_t raw; +}; + +union dp_main_link_channel_coding_lttpr_cap { + struct { + uint8_t DP_128b_132b_SUPPORTED :1; + uint8_t RESERVED :7; + } bits; + uint8_t raw; +}; + +union dp_128b_132b_supported_link_rates { + struct { + uint8_t UHBR10 :1; + uint8_t UHBR20 :1; + uint8_t UHBR13_5:1; + uint8_t RESERVED:5; + } bits; + uint8_t raw; +}; + +union dp_128b_132b_supported_lttpr_link_rates { + struct { + uint8_t UHBR10 :1; + uint8_t UHBR13_5:1; + uint8_t UHBR20 :1; + uint8_t RESERVED:5; + } bits; + uint8_t raw; +}; + +union dp_sink_video_fallback_formats { + struct { + uint8_t dp_1024x768_60Hz_24bpp_support :1; + uint8_t dp_1280x720_60Hz_24bpp_support :1; + uint8_t dp_1920x1080_60Hz_24bpp_support :1; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +union dp_fec_capability1 { + struct { + uint8_t AGGREGATED_ERROR_COUNTERS_CAPABLE :1; + uint8_t RESERVED :7; + } bits; + uint8_t raw; +}; + +struct dp_color_depth_caps { + uint8_t support_6bpc :1; + uint8_t support_8bpc :1; + uint8_t support_10bpc :1; + uint8_t support_12bpc :1; + uint8_t support_16bpc :1; + uint8_t RESERVED :3; +}; + +struct dp_encoding_format_caps { + uint8_t support_rgb :1; + uint8_t support_ycbcr444:1; + uint8_t support_ycbcr422:1; + uint8_t support_ycbcr420:1; + uint8_t RESERVED :4; +}; + +union dp_dfp_cap_ext { + struct { + uint8_t supported; + uint8_t max_pixel_rate_in_mps[2]; + uint8_t max_video_h_active_width[2]; + uint8_t max_video_v_active_height[2]; + struct dp_encoding_format_caps encoding_format_caps; + struct dp_color_depth_caps rgb_color_depth_caps; + struct dp_color_depth_caps ycbcr444_color_depth_caps; + struct dp_color_depth_caps ycbcr422_color_depth_caps; + struct dp_color_depth_caps ycbcr420_color_depth_caps; + } fields; + uint8_t raw[12]; +}; + +union dp_128b_132b_training_aux_rd_interval { + struct { + uint8_t VALUE :7; + uint8_t UNIT :1; + } bits; + uint8_t raw; +}; +#endif + #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 16cc76ce3739..684713b2cff7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -51,7 +51,6 @@ struct dc_dsc_policy { int min_slice_height; // Must not be less than 8 uint32_t max_target_bpp; uint32_t min_target_bpp; - uint32_t preferred_bpp_x16; bool enable_dsc_when_not_needed; }; @@ -81,6 +80,16 @@ bool dc_dsc_compute_config( uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp); +uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( + const struct dc_crtc_timing *timing, + const int num_slices_h, + const bool is_dp); + +/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to DM, + * and DM can choose to OVERRIDE the limitation on CASE BY CASE basis. + * Hardware/specs limitation should not be writable by DM. + * It should be decoupled from DM specific policy and named differently. + */ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, struct dc_dsc_policy *policy); diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 83845d006c54..56340a176554 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -45,6 +45,10 @@ struct dc_link_status { struct link_mst_stream_allocation { /* DIG front */ const struct stream_encoder *stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* HPO DP Stream Encoder */ + const struct hpo_dp_stream_encoder *hpo_dp_stream_enc; +#endif /* associate DRM payload table with DC stream encoder */ uint8_t vcp_id; /* number of slots required for the DP stream in transport packet */ @@ -150,6 +154,9 @@ struct dc_link { struct panel_cntl *panel_cntl; struct link_encoder *link_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_link_encoder *hpo_dp_link_enc; +#endif struct graphics_object_id link_id; /* Endpoint type distinguishes display endpoints which do not have entries * in the BIOS connector table from those that do. Helps when tracking link @@ -296,7 +303,8 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); * false - no change in Downstream port status. No further action required * from DM. */ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, - union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss); + union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss, + bool defer_handling, bool *has_left_work); /* * On eDP links this function call will stall until T12 has elapsed. @@ -305,9 +313,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, */ bool dc_link_wait_for_t12(struct dc_link *link); -enum dc_status read_hpd_rx_irq_data( - struct dc_link *link, - union hpd_irq_data *irq_data); +void dc_link_dp_handle_automated_test(struct dc_link *link); +void dc_link_dp_handle_link_loss(struct dc_link *link); +bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link); struct dc_sink_init_data; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index c1532930169b..3c109c805447 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -395,9 +395,27 @@ struct dc_lttpr_caps { uint8_t max_link_rate; uint8_t phy_repeater_cnt; uint8_t max_ext_timeout; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding; + union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; +#endif uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct dc_dongle_dfp_cap_ext { + bool supported; + uint16_t max_pixel_rate_in_mps; + uint16_t max_video_h_active_width; + uint16_t max_video_v_active_height; + struct dp_encoding_format_caps encoding_format_caps; + struct dp_color_depth_caps rgb_color_depth_caps; + struct dp_color_depth_caps ycbcr444_color_depth_caps; + struct dp_color_depth_caps ycbcr422_color_depth_caps; + struct dp_color_depth_caps ycbcr420_color_depth_caps; +}; +#endif + struct dc_dongle_caps { /* dongle type (DP converter, CV smart dongle) */ enum display_dongle_type dongle_type; @@ -411,6 +429,9 @@ struct dc_dongle_caps { bool is_dp_hdmi_ycbcr420_converter; uint32_t dp_hdmi_max_bpc; uint32_t dp_hdmi_max_pixel_clk_in_khz; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_dongle_dfp_cap_ext dfp_cap_ext; +#endif }; /* Scaling format */ enum scaling_transformation { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index 456fadbbfac7..b699d1b2ba83 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -96,6 +96,22 @@ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ NBIO_SR(BIOS_SCRATCH_2) +#define ABM_DCN302_REG_LIST(id)\ + ABM_COMMON_REG_LIST_DCE_BASE(), \ + SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ + SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \ + SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \ + SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \ + SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \ + SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \ + SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \ + SRI(BL1_PWM_USER_LEVEL, ABM, id), \ + SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \ + SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ + SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \ + SRI(DC_ABM1_ACE_THRES_12, ABM, id), \ + NBIO_SR(BIOS_SCRATCH_2) + #define ABM_DCN30_REG_LIST(id)\ ABM_COMMON_REG_LIST_DCE_BASE(), \ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index e14f99b4b0c3..5666543f095b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -42,7 +42,7 @@ #define DC_LOGGER \ engine->ctx->logger -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ +#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0) #define IS_DC_I2CAUX_LOGGING_ENABLED() (false) #define LOG_FLAG_Error_I2cAux LOG_ERROR #define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX @@ -76,7 +76,7 @@ enum { #define DEFAULT_AUX_ENGINE_MULT 0 #define DEFAULT_AUX_ENGINE_LENGTH 69 -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ +#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0) static void release_engine( struct dce_aux *engine) @@ -689,8 +689,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, enum aux_return_code_type operation_result; bool retry_on_defer = false; struct ddc *ddc_pin = ddc->ddc_pin; - struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; - struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); + struct dce_aux *aux_engine = NULL; + struct aux_engine_dce110 *aux110 = NULL; uint32_t defer_time_in_ms = 0; int aux_ack_retries = 0, @@ -699,6 +699,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, aux_timeout_retries = 0, aux_invalid_reply_retries = 0; + if (ddc_pin) { + aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; + aux110 = FROM_AUX_ENGINE(aux_engine); + } + if (!payload->reply) { payload_reply = false; payload->reply = &reply; @@ -765,7 +770,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER"); /* polling_timeout_period is in us */ - defer_time_in_ms += aux110->polling_timeout_period / 1000; + if (aux110) + defer_time_in_ms += aux110->polling_timeout_period / 1000; ++aux_defer_retries; fallthrough; case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 8d4263da59f2..779bc92a2968 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -919,6 +919,7 @@ static void dce110_stream_encoder_stop_dp_info_packets( } static void dce110_stream_encoder_dp_blank( + struct dc_link *link, struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); @@ -967,6 +968,7 @@ static void dce110_stream_encoder_dp_blank( /* output video stream to link encoder */ static void dce110_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 62d595ded866..af3e68d3e747 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -46,6 +46,7 @@ #include "transform.h" #include "stream_encoder.h" #include "link_encoder.h" +#include "link_enc_cfg.h" #include "link_hwss.h" #include "dc_link_dp.h" #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -57,7 +58,8 @@ #include "audio.h" #include "reg_helper.h" #include "panel_cntl.h" - +#include "inc/link_dpcd.h" +#include "dpcd_defs.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" @@ -1108,11 +1110,23 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) clk_mgr->funcs->enable_pme_wa(clk_mgr); /* un-mute audio */ /* TODO: audio should be per stream rather than per link */ - pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.hpo_dp_stream_enc, false); + else + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, false); +#else + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.stream_enc, false); +#endif if (pipe_ctx->stream_res.audio) pipe_ctx->stream_res.audio->enabled = true; } + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM); } void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) @@ -1129,14 +1143,32 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) return; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.hpo_dp_stream_enc, true); + else + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.stream_enc, true); +#else pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); +#endif if (pipe_ctx->stream_res.audio) { pipe_ctx->stream_res.audio->enabled = false; if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( + pipe_ctx->stream_res.hpo_dp_stream_enc); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( + pipe_ctx->stream_res.stream_enc); +#else pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc); +#endif else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable( pipe_ctx->stream_res.stream_enc); @@ -1151,6 +1183,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) * stream->stream_engine_id); */ } + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM); } void dce110_disable_stream(struct pipe_ctx *pipe_ctx) @@ -1158,6 +1193,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dc *dc = pipe_ctx->stream->ctx->dc; + struct link_encoder *link_enc = NULL; if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( @@ -1166,17 +1202,48 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets( + pipe_ctx->stream_res.hpo_dp_stream_enc); + } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#else if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#endif pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.stream_enc); dc->hwss.disable_audio_stream(pipe_ctx); - link->link_enc->funcs->connect_dig_be_to_fe( + /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ + if (link->ep_type == DISPLAY_ENDPOINT_PHY) + link_enc = link->link_enc; + else if (dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + ASSERT(link_enc); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->disable( + pipe_ctx->stream_res.hpo_dp_stream_enc); + setup_dp_hpo_stream(pipe_ctx, false); + /* TODO - DP2.0 HW: unmap stream from link encoder here */ + } else { + if (link_enc) + link_enc->funcs->connect_dig_be_to_fe( + link_enc, + pipe_ctx->stream_res.stream_enc->id, + false); + } +#else + if (link_enc) + link_enc->funcs->connect_dig_be_to_fe( link->link_enc, pipe_ctx->stream_res.stream_enc->id, false); - +#endif + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1192,7 +1259,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; if (dc_is_dp_signal(pipe_ctx->stream->signal)) - pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); + pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { hws->funcs.edp_backlight_control(link, true); @@ -1210,8 +1277,16 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank( + pipe_ctx->stream_res.hpo_dp_stream_enc); + } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { +#else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { - pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc); +#endif + pipe_ctx->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) { /* @@ -1436,6 +1511,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( struct dc *dc) { struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; struct drr_params params = {0}; unsigned int event_triggers = 0; struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; @@ -1451,10 +1527,23 @@ static enum dc_status apply_single_controller_ctx_to_hw( build_audio_output(context, pipe_ctx, &audio_output); if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( + pipe_ctx->stream_res.hpo_dp_stream_enc, + pipe_ctx->stream_res.audio->inst, + &pipe_ctx->stream->audio_info); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.audio->inst, + &pipe_ctx->stream->audio_info); +#else pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.audio->inst, &pipe_ctx->stream->audio_info); +#endif else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup( pipe_ctx->stream_res.stream_enc, @@ -1469,10 +1558,18 @@ static enum dc_status apply_single_controller_ctx_to_hw( &pipe_ctx->stream->audio_info); } - /* */ - /* Do not touch stream timing on seamless boot optimization. */ - if (!pipe_ctx->stream->apply_seamless_boot_optimization) - hws->funcs.enable_stream_timing(pipe_ctx, context, dc); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* DCN3.1 FPGA Workaround + * Need to enable HPO DP Stream Encoder before setting OTG master enable. + * To do so, move calling function enable_stream_timing to only be done AFTER calling + * function core_link_enable_stream + */ + if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx))) +#endif + /* */ + /* Do not touch stream timing on seamless boot optimization. */ + if (!pipe_ctx->stream->apply_seamless_boot_optimization) + hws->funcs.enable_stream_timing(pipe_ctx, context, dc); if (hws->funcs.setup_vupdate_interrupt) hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); @@ -1499,6 +1596,9 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601, @@ -1526,6 +1626,18 @@ static enum dc_status apply_single_controller_ctx_to_hw( if (!stream->dpms_off) core_link_enable_stream(context, pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* DCN3.1 FPGA Workaround + * Need to enable HPO DP Stream Encoder before setting OTG master enable. + * To do so, move calling function enable_stream_timing to only be done AFTER calling + * function core_link_enable_stream + */ + if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) { + if (!pipe_ctx->stream->apply_seamless_boot_optimization) + hws->funcs.enable_stream_timing(pipe_ctx, context, dc); + } +#endif + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; @@ -1537,29 +1649,37 @@ static enum dc_status apply_single_controller_ctx_to_hw( static void power_down_encoders(struct dc *dc) { - int i; - - /* do not know BIOS back-front mapping, simply blank all. It will not - * hurt for non-DP - */ - for (i = 0; i < dc->res_pool->stream_enc_count; i++) { - dc->res_pool->stream_enc[i]->funcs->dp_blank( - dc->res_pool->stream_enc[i]); - } + int i, j; for (i = 0; i < dc->link_count; i++) { enum signal_type signal = dc->links[i]->connector_signal; if ((signal == SIGNAL_TYPE_EDP) || - (signal == SIGNAL_TYPE_DISPLAY_PORT)) + (signal == SIGNAL_TYPE_DISPLAY_PORT)) { + if (dc->links[i]->link_enc->funcs->get_dig_frontend && + dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) { + unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend( + dc->links[i]->link_enc); + + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], + dc->res_pool->stream_enc[j]); + break; + } + } + } + if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(dc->links[i], false); + } if (signal != SIGNAL_TYPE_EDP) signal = SIGNAL_TYPE_NONE; - dc->links[i]->link_enc->funcs->disable_output( - dc->links[i]->link_enc, signal); + if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY) + dc->links[i]->link_enc->funcs->disable_output( + dc->links[i]->link_enc, signal); dc->links[i]->link_status.link_active = false; memset(&dc->links[i]->cur_link_settings, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index df8a7718a85f..c5e2b4f138fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -466,6 +466,71 @@ void dcn10_log_hw_state(struct dc *dc, log_mpc_crc(dc, log_ctx); + { + int hpo_dp_link_enc_count = 0; + + if (pool->hpo_dp_stream_enc_count > 0) { + DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n"); + for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { + struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0}; + struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i]; + + if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) { + hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state); + + DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n", + hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0, + hpo_dp_se_state.stream_enc_enabled, + hpo_dp_se_state.otg_inst, + (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" : + ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" : + (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"), + (hpo_dp_se_state.component_depth == 0) ? 6 : + ((hpo_dp_se_state.component_depth == 1) ? 8 : + (hpo_dp_se_state.component_depth == 2) ? 10 : 12), + hpo_dp_se_state.vid_stream_enabled, + hpo_dp_se_state.sdp_enabled, + hpo_dp_se_state.compressed_format, + hpo_dp_se_state.mapped_to_link_enc); + } + } + + DTN_INFO("\n"); + } + + /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */ + for (i = 0; i < dc->link_count; i++) + if (dc->links[i]->hpo_dp_link_enc) + hpo_dp_link_enc_count++; + + if (hpo_dp_link_enc_count) { + DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n"); + + for (i = 0; i < dc->link_count; i++) { + struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc; + struct hpo_dp_link_enc_state hpo_dp_le_state = {0}; + + if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) { + hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state); + DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n", + hpo_dp_link_enc->inst, + hpo_dp_le_state.link_enc_enabled, + (hpo_dp_le_state.link_mode == 0) ? "TPS1" : + (hpo_dp_le_state.link_mode == 1) ? "TPS2" : + (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST", + hpo_dp_le_state.lane_count, + hpo_dp_le_state.stream_src[0], + hpo_dp_le_state.slot_count[0], + hpo_dp_le_state.vc_rate_x[0], + hpo_dp_le_state.vc_rate_y[0]); + DTN_INFO("\n"); + } + } + + DTN_INFO("\n"); + } + } + DTN_INFO_END(); } @@ -1424,7 +1489,7 @@ void dcn10_init_hw(struct dc *dc) for (j = 0; j < dc->res_pool->stream_enc_count; j++) { if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank( + dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], dc->res_pool->stream_enc[j]); break; } @@ -1522,7 +1587,7 @@ void dcn10_power_down_on_boot(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->link_enc->funcs->is_dig_enabled && + if (link->link_enc && link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc) && dc->hwss.power_down) { dc->hwss.power_down(dc); @@ -3176,13 +3241,11 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) { - struct pipe_ctx *test_pipe; + struct pipe_ctx *test_pipe, *split_pipe; const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; - const struct rect *r1 = &scl_data->recout, *r2; - int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; + struct rect r1 = scl_data->recout, r2, r2_half; + int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b; int cur_layer = pipe_ctx->plane_state->layer_index; - bool upper_pipe_exists = false; - struct fixed31_32 one = dc_fixpt_from_int(1); /** * Disable the cursor if there's another pipe above this with a @@ -3191,26 +3254,33 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) */ for (test_pipe = pipe_ctx->top_pipe; test_pipe; test_pipe = test_pipe->top_pipe) { - if (!test_pipe->plane_state->visible) + // Skip invisible layer and pipe-split plane on same layer + if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer) continue; - r2 = &test_pipe->plane_res.scl_data.recout; - r2_r = r2->x + r2->width; - r2_b = r2->y + r2->height; + r2 = test_pipe->plane_res.scl_data.recout; + r2_r = r2.x + r2.width; + r2_b = r2.y + r2.height; + split_pipe = test_pipe; - if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b) - return true; + /** + * There is another half plane on same layer because of + * pipe-split, merge together per same height. + */ + for (split_pipe = pipe_ctx->top_pipe; split_pipe; + split_pipe = split_pipe->top_pipe) + if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { + r2_half = split_pipe->plane_res.scl_data.recout; + r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x; + r2.width = r2.width + r2_half.width; + r2_r = r2.x + r2.width; + break; + } - if (test_pipe->plane_state->layer_index < cur_layer) - upper_pipe_exists = true; + if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b) + return true; } - // if plane scaled, assume an upper plane can handle cursor if it exists. - if (upper_pipe_exists && - (scl_data->ratios.horz.value != one.value || - scl_data->ratios.vert.value != one.value)) - return true; - return false; } @@ -3613,7 +3683,7 @@ void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, if (dc_is_dp_signal(pipe_ctx->stream->signal)) { if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) params.timing.pix_clk_100hz /= 2; - pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); + pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index e4701825b5a0..2dc4b4e4ba02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1460,5 +1460,14 @@ void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc, if (enc->features.flags.bits.IS_HBR3_CAPABLE) max_link_cap.link_rate = LINK_RATE_HIGH3; + if (enc->features.flags.bits.IS_UHBR10_CAPABLE) + max_link_cap.link_rate = LINK_RATE_UHBR10; + + if (enc->features.flags.bits.IS_UHBR13_5_CAPABLE) + max_link_cap.link_rate = LINK_RATE_UHBR13_5; + + if (enc->features.flags.bits.IS_UHBR20_CAPABLE) + max_link_cap.link_rate = LINK_RATE_UHBR20; + *link_settings = max_link_cap; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index d8b22618b79e..c337588231ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -118,6 +118,7 @@ struct dcn10_link_enc_registers { uint32_t RDPCSTX_PHY_CNTL4; uint32_t RDPCSTX_PHY_CNTL5; uint32_t RDPCSTX_PHY_CNTL6; + uint32_t RDPCSPIPE_PHY_CNTL6; uint32_t RDPCSTX_PHY_CNTL7; uint32_t RDPCSTX_PHY_CNTL8; uint32_t RDPCSTX_PHY_CNTL9; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 7daadb6a5233..f37551e00023 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1296,7 +1296,7 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( * in daisy chain use case */ j = i; - if (pool->stream_enc[i]->id == + if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index cf364ae93138..a97bdaa54f73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -29,6 +29,9 @@ #include "dcn10_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" +#include "inc/link_dpcd.h" +#include "dpcd_defs.h" +#include "dcn30/dcn30_afmt.h" #define DC_LOGGER \ enc1->base.ctx->logger @@ -726,6 +729,16 @@ void enc1_stream_encoder_update_dp_info_packets( 0, /* packetIndex */ &info_frame->vsc); + /* VSC SDP at packetIndex 1 is used by PSR in DMCUB FW. + * Note that the enablement of GSP1 is not done below, + * it's done in FW. + */ + if (info_frame->vsc.valid) + enc1_update_generic_info_packet( + enc1, + 1, /* packetIndex */ + &info_frame->vsc); + if (info_frame->spd.valid) enc1_update_generic_info_packet( enc1, @@ -884,6 +897,7 @@ void enc1_stream_encoder_stop_dp_info_packets( } void enc1_stream_encoder_dp_blank( + struct dc_link *link, struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); @@ -914,6 +928,8 @@ void enc1_stream_encoder_dp_blank( /* disable DP stream */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM); + /* the encoder stops sending the video stream * at the start of the vertical blanking. * Poll for DP_VID_STREAM_STATUS == 0 @@ -930,10 +946,13 @@ void enc1_stream_encoder_dp_blank( */ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); + + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET); } /* output video stream to link encoder */ void enc1_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) { @@ -1000,6 +1019,8 @@ void enc1_stream_encoder_dp_unblank( */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); + + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } void enc1_stream_encoder_set_avmute( @@ -1381,6 +1402,11 @@ static void enc1_se_disable_dp_audio( struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); uint32_t value = 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (enc->afmt && enc->afmt->funcs->afmt_powerdown) + enc->afmt->funcs->afmt_powerdown(enc->afmt); +#endif + /* Disable Audio packets */ REG_UPDATE_5(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 0, @@ -1444,6 +1470,10 @@ void enc1_se_hdmi_audio_setup( void enc1_se_hdmi_audio_disable( struct stream_encoder *enc) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (enc->afmt && enc->afmt->funcs->afmt_powerdown) + enc->afmt->funcs->afmt_powerdown(enc->afmt); +#endif enc1_se_enable_audio_clock(enc, false); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 0d86df97878c..687d7e4bf7ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -627,9 +627,11 @@ void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc); void enc1_stream_encoder_dp_blank( + struct dc_link *link, struct stream_encoder *enc); void enc1_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index a47ba1d45be9..fc83744149d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -52,6 +52,9 @@ #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" #include "hw_sequencer.h" +#include "inc/link_dpcd.h" +#include "dpcd_defs.h" +#include "inc/link_enc_cfg.h" #define DC_LOGGER_INIT(logger) @@ -2135,12 +2138,17 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; - if (dc_is_dp_signal(pipe_ctx->stream->signal)) { + if (is_dp_128b_132b_signal(pipe_ctx)) { + /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( + pipe_ctx->stream_res.hpo_dp_stream_enc, + pipe_ctx->stream_res.tg->inst); + } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) params.timing.pix_clk_100hz /= 2; pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); - pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); + pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { @@ -2374,14 +2382,36 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) uint32_t active_total_with_borders; uint32_t early_control = 0; struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct link_encoder *link_enc; + + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream); + else + link_enc = link->link_enc; + ASSERT(link_enc); /* For MST, there are multiply stream go to only one link. * connect DIG back_end to front_end while enable_stream and * disconnect them during disable_stream * BY this, it is logic clean to separate stream and link */ - link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, - pipe_ctx->stream_res.stream_enc->id, true); + if (is_dp_128b_132b_signal(pipe_ctx)) { + setup_dp_hpo_stream(pipe_ctx, true); + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream( + pipe_ctx->stream_res.hpo_dp_stream_enc); + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link( + pipe_ctx->stream_res.hpo_dp_stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc->inst, + link->hpo_dp_link_enc->inst); + } + + if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc) + link_enc->funcs->connect_dig_be_to_fe( + link_enc, pipe_ctx->stream_res.stream_enc->id, true); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { if (link->dc->hwss.program_dmdata_engine) @@ -2390,6 +2420,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) link->dc->hwss.update_info_frame(pipe_ctx); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + /* enable early control to avoid corruption on DP monitor*/ active_total_with_borders = timing->h_addressable @@ -2406,7 +2439,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) /* enable audio only within mode set */ if (pipe_ctx->stream_res.audio != NULL) { - if (dc_is_dp_signal(pipe_ctx->stream->signal)) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.hpo_dp_stream_enc); + else if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index e3e01b17c164..3c388afa06dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -63,6 +63,7 @@ #include "dcn20_dccg.h" #include "dcn20_vmid.h" #include "dc_link_ddc.h" +#include "dc_link_dp.h" #include "dce/dce_panel_cntl.h" #include "navi10_ip_offset.h" @@ -86,6 +87,7 @@ #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "vm_helper.h" +#include "link_enc_cfg.h" #include "amdgpu_socbb.h" @@ -1595,15 +1597,32 @@ static void get_pixel_clock_parameters( const struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; + struct dc_link *link = stream->link; + struct link_encoder *link_enc = NULL; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; - pixel_clk_params->encoder_object_id = stream->link->link_enc->id; + + /* Links supporting dynamically assigned link encoder will be assigned next + * available encoder if one not already assigned. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_stream(stream->ctx->dc, stream); + if (link_enc == NULL) + link_enc = link_enc_cfg_get_next_avail_link_enc(stream->ctx->dc); + } else + link_enc = stream->link->link_enc; + ASSERT(link_enc); + + if (link_enc) + pixel_clk_params->encoder_object_id = link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ + /* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; @@ -1854,7 +1873,9 @@ static void swizzle_to_dml_params( case DC_SW_VAR_D_X: *sw_mode = dm_sw_var_d_x; break; - + case DC_SW_VAR_R_X: + *sw_mode = dm_sw_var_r_x; + break; default: ASSERT(0); /* Not supported */ break; @@ -3044,6 +3065,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; + if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + return true; } return false; } @@ -3152,7 +3175,7 @@ void dcn20_calculate_dlg_params( context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, &context->res_ctx.pipe_ctx[i].rq_regs, - pipes[pipe_idx].pipe); + &pipes[pipe_idx].pipe); pipe_idx++; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index e6307397e0d2..11c50b508754 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -29,6 +29,8 @@ #include "dcn20_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" +#include "inc/link_dpcd.h" +#include "dpcd_defs.h" #define DC_LOGGER \ enc1->base.ctx->logger @@ -290,7 +292,8 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc, static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, bool enable, - uint8_t *dsc_packed_pps) + uint8_t *dsc_packed_pps, + bool immediate_update) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); @@ -444,6 +447,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) } void enc2_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) { @@ -522,6 +526,8 @@ void enc2_stream_encoder_dp_unblank( */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); + + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } static void enc2_dp_set_odm_combine( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h index f3d1a0237bda..baa1e539f341 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h @@ -104,6 +104,7 @@ void enc2_stream_encoder_dp_set_stream_attribute( uint32_t enable_sdp_splitting); void enc2_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c index fa981cd04dd0..95528e5ef89e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c @@ -44,11 +44,14 @@ afmt3->base.ctx -static void afmt3_setup_hdmi_audio( +void afmt3_setup_hdmi_audio( struct afmt *afmt) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); + if (afmt->funcs->afmt_poweron) + afmt->funcs->afmt_poweron(afmt); + /* AFMT_AUDIO_PACKET_CONTROL */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); @@ -113,7 +116,7 @@ static union audio_cea_channels speakers_to_channels( return cea_channels; } -static void afmt3_se_audio_setup( +void afmt3_se_audio_setup( struct afmt *afmt, unsigned int az_inst, struct audio_info *audio_info) @@ -138,20 +141,24 @@ static void afmt3_se_audio_setup( REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels); /* Disable forced mem power off */ - REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0); + if (afmt->funcs->afmt_poweron == NULL) + REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0); } -static void afmt3_audio_mute_control( +void afmt3_audio_mute_control( struct afmt *afmt, bool mute) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); - + if (mute && afmt->funcs->afmt_powerdown) + afmt->funcs->afmt_powerdown(afmt); + if (!mute && afmt->funcs->afmt_poweron) + afmt->funcs->afmt_poweron(afmt); /* enable/disable transmission of audio packets */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute); } -static void afmt3_audio_info_immediate_update( +void afmt3_audio_info_immediate_update( struct afmt *afmt) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); @@ -160,11 +167,14 @@ static void afmt3_audio_info_immediate_update( REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); } -static void afmt3_setup_dp_audio( +void afmt3_setup_dp_audio( struct afmt *afmt) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); + if (afmt->funcs->afmt_poweron) + afmt->funcs->afmt_poweron(afmt); + /* AFMT_AUDIO_PACKET_CONTROL */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h index 85d4619207e2..97e0cf62f98e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h @@ -121,6 +121,12 @@ struct afmt_funcs { void (*setup_dp_audio)( struct afmt *afmt); + + void (*afmt_poweron)( + struct afmt *afmt); + + void (*afmt_powerdown)( + struct afmt *afmt); }; struct afmt { @@ -136,6 +142,24 @@ struct dcn30_afmt { const struct dcn30_afmt_mask *afmt_mask; }; +void afmt3_setup_hdmi_audio( + struct afmt *afmt); + +void afmt3_se_audio_setup( + struct afmt *afmt, + unsigned int az_inst, + struct audio_info *audio_info); + +void afmt3_audio_mute_control( + struct afmt *afmt, + bool mute); + +void afmt3_audio_info_immediate_update( + struct afmt *afmt); + +void afmt3_setup_dp_audio( + struct afmt *afmt); + void afmt3_construct(struct dcn30_afmt *afmt3, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c index 46ea39f5ef8d..6f3c2fb60790 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c @@ -192,6 +192,10 @@ void dcn30_link_encoder_construct( enc10->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE; + enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN; + enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN; + enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN; enc10->base.features.flags.bits.DP_IS_USB_C = bp_cap_info.DP_IS_USB_C; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 8487516819ef..3ea6dacec80c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -77,7 +77,8 @@ static void enc3_update_hdmi_info_packet( enc1->base.vpg->funcs->update_generic_info_packet( enc1->base.vpg, packet_index, - info_packet); + info_packet, + true); /* enable transmission of packet(s) - * packet transmission begins on the next frame */ @@ -335,7 +336,8 @@ static void enc3_dp_set_dsc_config(struct stream_encoder *enc, static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, bool enable, - uint8_t *dsc_packed_pps) + uint8_t *dsc_packed_pps, + bool immediate_update) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); @@ -365,7 +367,8 @@ static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, enc1->base.vpg->funcs->update_generic_info_packet( enc1->base.vpg, 11 + i, - &pps_sdp); + &pps_sdp, + immediate_update); } /* SW should make sure VBID[6] update line number is bigger @@ -429,19 +432,22 @@ static void enc3_stream_encoder_update_dp_info_packets( enc->vpg->funcs->update_generic_info_packet( enc->vpg, 0, /* packetIndex */ - &info_frame->vsc); + &info_frame->vsc, + true); } if (info_frame->spd.valid) { enc->vpg->funcs->update_generic_info_packet( enc->vpg, 2, /* packetIndex */ - &info_frame->spd); + &info_frame->spd, + true); } if (info_frame->hdrsmd.valid) { enc->vpg->funcs->update_generic_info_packet( enc->vpg, 3, /* packetIndex */ - &info_frame->hdrsmd); + &info_frame->hdrsmd, + true); } /* packetIndex 4 is used for send immediate sdp message, and please * use other packetIndex (such as 5,6) for other info packet @@ -704,6 +710,8 @@ static void enc3_se_setup_dp_audio( static void enc3_se_dp_audio_enable( struct stream_encoder *enc) { + if (enc->afmt->funcs->afmt_poweron) + enc->afmt->funcs->afmt_poweron(enc->afmt); enc1_se_enable_audio_clock(enc, true); enc3_se_setup_dp_audio(enc); enc1_se_enable_dp_audio(enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index f24612523248..eac08926b574 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -356,12 +356,6 @@ void hubp3_dcc_control_sienna_cichlid(struct hubp *hubp, { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - /*Workaround until UMD fix the new dcc_ind_blk interface */ - if (dcc->independent_64b_blks && dcc->dcc_ind_blk == 0) - dcc->dcc_ind_blk = 1; - if (dcc->independent_64b_blks_c && dcc->dcc_ind_blk_c == 0) - dcc->dcc_ind_blk_c = 1; - REG_UPDATE_6(DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, dcc->enable, PRIMARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index fafed1e4a998..b132ebed09d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -559,7 +559,7 @@ void dcn30_init_hw(struct dc *dc) for (j = 0; j < dc->res_pool->stream_enc_count; j++) { if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank( + dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], dc->res_pool->stream_enc[j]); break; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 3a5b53dd2f6d..93f32a312fee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .is_abm_supported = dcn21_is_abm_supported }; static const struct hwseq_private_funcs dcn30_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 089be7347591..5d9e6413d67a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -73,16 +73,23 @@ void optc3_lock_doublebuffer_enable(struct timing_generator *optc) OTG_H_BLANK_END, &h_blank_end); REG_UPDATE_2(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start, - MASTER_UPDATE_LOCK_DB_END_Y, v_blank_end); + MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start - 1, + MASTER_UPDATE_LOCK_DB_END_Y, v_blank_start); REG_UPDATE_2(OTG_GLOBAL_CONTROL4, - DIG_UPDATE_POSITION_X, 20, - DIG_UPDATE_POSITION_Y, v_blank_start); + DIG_UPDATE_POSITION_X, h_blank_start - 180 - 1, + DIG_UPDATE_POSITION_Y, v_blank_start - 1); + // there is a DIG_UPDATE_VCOUNT_MODE and it is 0. + REG_UPDATE_3(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1, - MASTER_UPDATE_LOCK_DB_END_X, h_blank_end, + MASTER_UPDATE_LOCK_DB_END_X, h_blank_start - 180, MASTER_UPDATE_LOCK_DB_EN, 1); REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1); + + REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); } void optc3_lock_doublebuffer_disable(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index a0de309475a9..3a8a3214f770 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -1164,8 +1164,12 @@ struct stream_encoder *dcn30_stream_encoder_create( vpg = dcn30_vpg_create(ctx, vpg_inst); afmt = dcn30_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); return NULL; + } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, @@ -1856,7 +1860,7 @@ static struct pipe_ctx *dcn30_find_split_pipe( return pipe; } -static noinline bool dcn30_internal_validate_bw( +noinline bool dcn30_internal_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h index b754b89beadf..b92e4cc0232f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h @@ -55,6 +55,13 @@ unsigned int dcn30_calc_max_scaled_time( bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +bool dcn30_internal_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate); void dcn30_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c index 8cfd181b4d5f..14bc44b1f886 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c @@ -43,10 +43,11 @@ vpg3->base.ctx -static void vpg3_update_generic_info_packet( +void vpg3_update_generic_info_packet( struct vpg *vpg, uint32_t packet_index, - const struct dc_info_packet *info_packet) + const struct dc_info_packet *info_packet, + bool immediate_update) { struct dcn30_vpg *vpg3 = DCN30_VPG_FROM_VPG(vpg); uint32_t i; @@ -106,69 +107,138 @@ static void vpg3_update_generic_info_packet( /* atomically update double-buffered GENERIC0 registers in immediate mode * (update at next block_update when block_update_lock == 0). */ - switch (packet_index) { - case 0: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC0_IMMEDIATE_UPDATE, 1); - break; - case 1: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC1_IMMEDIATE_UPDATE, 1); - break; - case 2: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC2_IMMEDIATE_UPDATE, 1); - break; - case 3: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC3_IMMEDIATE_UPDATE, 1); - break; - case 4: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC4_IMMEDIATE_UPDATE, 1); - break; - case 5: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC5_IMMEDIATE_UPDATE, 1); - break; - case 6: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC6_IMMEDIATE_UPDATE, 1); - break; - case 7: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC7_IMMEDIATE_UPDATE, 1); - break; - case 8: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC8_IMMEDIATE_UPDATE, 1); - break; - case 9: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC9_IMMEDIATE_UPDATE, 1); - break; - case 10: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC10_IMMEDIATE_UPDATE, 1); - break; - case 11: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC11_IMMEDIATE_UPDATE, 1); - break; - case 12: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC12_IMMEDIATE_UPDATE, 1); - break; - case 13: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC13_IMMEDIATE_UPDATE, 1); - break; - case 14: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC14_IMMEDIATE_UPDATE, 1); - break; - default: - break; + if (immediate_update) { + switch (packet_index) { + case 0: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC0_IMMEDIATE_UPDATE, 1); + break; + case 1: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC1_IMMEDIATE_UPDATE, 1); + break; + case 2: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC2_IMMEDIATE_UPDATE, 1); + break; + case 3: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC3_IMMEDIATE_UPDATE, 1); + break; + case 4: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC4_IMMEDIATE_UPDATE, 1); + break; + case 5: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC5_IMMEDIATE_UPDATE, 1); + break; + case 6: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC6_IMMEDIATE_UPDATE, 1); + break; + case 7: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC7_IMMEDIATE_UPDATE, 1); + break; + case 8: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC8_IMMEDIATE_UPDATE, 1); + break; + case 9: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC9_IMMEDIATE_UPDATE, 1); + break; + case 10: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC10_IMMEDIATE_UPDATE, 1); + break; + case 11: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC11_IMMEDIATE_UPDATE, 1); + break; + case 12: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC12_IMMEDIATE_UPDATE, 1); + break; + case 13: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC13_IMMEDIATE_UPDATE, 1); + break; + case 14: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC14_IMMEDIATE_UPDATE, 1); + break; + default: + break; + } + } else { + switch (packet_index) { + case 0: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC0_FRAME_UPDATE, 1); + break; + case 1: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC1_FRAME_UPDATE, 1); + break; + case 2: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC2_FRAME_UPDATE, 1); + break; + case 3: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC3_FRAME_UPDATE, 1); + break; + case 4: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC4_FRAME_UPDATE, 1); + break; + case 5: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC5_FRAME_UPDATE, 1); + break; + case 6: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC6_FRAME_UPDATE, 1); + break; + case 7: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC7_FRAME_UPDATE, 1); + break; + case 8: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC8_FRAME_UPDATE, 1); + break; + case 9: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC9_FRAME_UPDATE, 1); + break; + case 10: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC10_FRAME_UPDATE, 1); + break; + case 11: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC11_FRAME_UPDATE, 1); + break; + case 12: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC12_FRAME_UPDATE, 1); + break; + case 13: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC13_FRAME_UPDATE, 1); + break; + case 14: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC14_FRAME_UPDATE, 1); + break; + + default: + break; + } + } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h index 6161e9e66355..ed9a5549c389 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h @@ -138,7 +138,14 @@ struct vpg_funcs { void (*update_generic_info_packet)( struct vpg *vpg, uint32_t packet_index, - const struct dc_info_packet *info_packet); + const struct dc_info_packet *info_packet, + bool immediate_update); + + void (*vpg_poweron)( + struct vpg *vpg); + + void (*vpg_powerdown)( + struct vpg *vpg); }; struct vpg { @@ -154,6 +161,12 @@ struct dcn30_vpg { const struct dcn30_vpg_mask *vpg_mask; }; +void vpg3_update_generic_info_packet( + struct vpg *vpg, + uint32_t packet_index, + const struct dc_info_packet *info_packet, + bool immediate_update); + void vpg3_construct(struct dcn30_vpg *vpg3, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 912285fdce18..5350c93d7772 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1195,8 +1195,12 @@ struct stream_encoder *dcn301_stream_encoder_create( vpg = dcn301_vpg_create(ctx, vpg_inst); afmt = dcn301_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); return NULL; + } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, @@ -1622,12 +1626,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); } +static void calculate_wm_set_for_vlevel( + int vlevel, + struct wm_range_table_entry *table_entry, + struct dcn_watermarks *wm_set, + struct display_mode_lib *dml, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; + + ASSERT(vlevel < dml->soc.num_states); + /* only pipe 0 is read for voltage and dcf/soc clocks */ + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; + + dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; + + wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; + wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; + dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; + +} + +static void dcn301_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req) +{ + int i, pipe_idx; + int vlevel, vlevel_max; + struct wm_range_table_entry *table_entry; + struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; + + ASSERT(bw_params); + + vlevel_max = bw_params->clk_table.num_entries - 1; + + /* WM Set D */ + table_entry = &bw_params->wm_table.entries[WM_D]; + if (table_entry->wm_type == WM_TYPE_RETRAINING) + vlevel = 0; + else + vlevel = vlevel_max; + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set C */ + table_entry = &bw_params->wm_table.entries[WM_C]; + vlevel = min(max(vlevel_req, 2), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set B */ + table_entry = &bw_params->wm_table.entries[WM_B]; + vlevel = min(max(vlevel_req, 1), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, + &context->bw_ctx.dml, pipes, pipe_cnt); + + /* WM Set A */ + table_entry = &bw_params->wm_table.entries[WM_A]; + vlevel = min(vlevel_req, vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, + &context->bw_ctx.dml, pipes, pipe_cnt); + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_idx++; + } + + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); +} + static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 7d3ff5d44402..fcf96cf08c76 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -542,8 +542,12 @@ static struct stream_encoder *dcn302_stream_encoder_create(enum engine_id eng_id vpg = dcn302_vpg_create(ctx, vpg_inst); afmt = dcn302_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); return NULL; + } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -1462,7 +1466,7 @@ static const struct dccg_mask dccg_mask = { }; #define abm_regs(id)\ - [id] = { ABM_DCN301_REG_LIST(id) } + [id] = { ABM_DCN302_REG_LIST(id) } static const struct dce_abm_registers abm_regs[] = { abm_regs(0), diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index dd38796ba30a..2ce6eae7535d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -1394,7 +1394,7 @@ static const struct dccg_mask dccg_mask = { }; #define abm_regs(id)\ - [id] = { ABM_DCN301_REG_LIST(id) } + [id] = { ABM_DCN302_REG_LIST(id) } static const struct dce_abm_registers abm_regs[] = { abm_regs(0), diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile index 4bab97acb155..d20e3b8ccc30 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile @@ -11,7 +11,9 @@ # Makefile for dcn31. DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_hwseq.o dcn31_init.o dcn31_hubp.o \ - dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o + dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \ + dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \ + dcn31_afmt.o dcn31_vpg.o ifdef CONFIG_X86 CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o := -msse diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c new file mode 100644 index 000000000000..d380a8ec2184 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "hw_shared.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn31_afmt.h" +#include "reg_helper.h" +#include "dc/dc.h" + +#define DC_LOGGER \ + afmt31->base.ctx->logger + +#define REG(reg)\ + (afmt31->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + afmt31->afmt_shift->field_name, afmt31->afmt_mask->field_name + + +#define CTX \ + afmt31->base.ctx + +static struct afmt_funcs dcn31_afmt_funcs = { + .setup_hdmi_audio = afmt3_setup_hdmi_audio, + .se_audio_setup = afmt3_se_audio_setup, + .audio_mute_control = afmt3_audio_mute_control, + .audio_info_immediate_update = afmt3_audio_info_immediate_update, + .setup_dp_audio = afmt3_setup_dp_audio, + .afmt_powerdown = afmt31_powerdown, + .afmt_poweron = afmt31_poweron +}; + +void afmt31_powerdown(struct afmt *afmt) +{ + struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt); + + if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false) + return; + + REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 0, AFMT_MEM_PWR_FORCE, 1); +} + +void afmt31_poweron(struct afmt *afmt) +{ + struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt); + + if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false) + return; + + REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 1, AFMT_MEM_PWR_FORCE, 0); +} + +void afmt31_construct(struct dcn31_afmt *afmt31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_afmt_registers *afmt_regs, + const struct dcn31_afmt_shift *afmt_shift, + const struct dcn31_afmt_mask *afmt_mask) +{ + afmt31->base.ctx = ctx; + + afmt31->base.inst = inst; + afmt31->base.funcs = &dcn31_afmt_funcs; + + afmt31->regs = afmt_regs; + afmt31->afmt_shift = afmt_shift; + afmt31->afmt_mask = afmt_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h new file mode 100644 index 000000000000..802cb05b6ab9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h @@ -0,0 +1,126 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_AFMT_H__ +#define __DAL_DCN31_AFMT_H__ + + +#define DCN31_AFMT_FROM_AFMT(afmt)\ + container_of(afmt, struct dcn31_afmt, base) + +#define AFMT_DCN31_REG_LIST(id) \ + SRI(AFMT_INFOFRAME_CONTROL0, AFMT, id), \ + SRI(AFMT_VBI_PACKET_CONTROL, AFMT, id), \ + SRI(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \ + SRI(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \ + SRI(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \ + SRI(AFMT_60958_0, AFMT, id), \ + SRI(AFMT_60958_1, AFMT, id), \ + SRI(AFMT_60958_2, AFMT, id), \ + SRI(AFMT_MEM_PWR, AFMT, id) + +struct dcn31_afmt_registers { + uint32_t AFMT_INFOFRAME_CONTROL0; + uint32_t AFMT_VBI_PACKET_CONTROL; + uint32_t AFMT_AUDIO_PACKET_CONTROL; + uint32_t AFMT_AUDIO_PACKET_CONTROL2; + uint32_t AFMT_AUDIO_SRC_CONTROL; + uint32_t AFMT_60958_0; + uint32_t AFMT_60958_1; + uint32_t AFMT_60958_2; + uint32_t AFMT_MEM_PWR; +}; + +#define DCN31_AFMT_MASK_SH_LIST(mask_sh)\ + SE_SF(AFMT0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\ + SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, mask_sh),\ + SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, mask_sh),\ + SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_STATE, mask_sh) + +#define AFMT_DCN31_REG_FIELD_LIST(type) \ + type AFMT_AUDIO_INFO_UPDATE;\ + type AFMT_AUDIO_SRC_SELECT;\ + type AFMT_AUDIO_CHANNEL_ENABLE;\ + type AFMT_60958_CS_UPDATE;\ + type AFMT_AUDIO_LAYOUT_OVRD;\ + type AFMT_60958_OSF_OVRD;\ + type AFMT_60958_CS_CHANNEL_NUMBER_L;\ + type AFMT_60958_CS_CLOCK_ACCURACY;\ + type AFMT_60958_CS_CHANNEL_NUMBER_R;\ + type AFMT_60958_CS_CHANNEL_NUMBER_2;\ + type AFMT_60958_CS_CHANNEL_NUMBER_3;\ + type AFMT_60958_CS_CHANNEL_NUMBER_4;\ + type AFMT_60958_CS_CHANNEL_NUMBER_5;\ + type AFMT_60958_CS_CHANNEL_NUMBER_6;\ + type AFMT_60958_CS_CHANNEL_NUMBER_7;\ + type AFMT_AUDIO_SAMPLE_SEND;\ + type AFMT_MEM_PWR_FORCE;\ + type AFMT_MEM_PWR_DIS;\ + type AFMT_MEM_PWR_STATE + +struct dcn31_afmt_shift { + AFMT_DCN31_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_afmt_mask { + AFMT_DCN31_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_afmt { + struct afmt base; + const struct dcn31_afmt_registers *regs; + const struct dcn31_afmt_shift *afmt_shift; + const struct dcn31_afmt_mask *afmt_mask; +}; + +void afmt31_poweron( + struct afmt *afmt); + +void afmt31_powerdown( + struct afmt *afmt); + +void afmt31_construct(struct dcn31_afmt *afmt31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_afmt_registers *afmt_regs, + const struct dcn31_afmt_shift *afmt_shift, + const struct dcn31_afmt_mask *afmt_mask); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c new file mode 100644 index 000000000000..6bd7a0626665 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c @@ -0,0 +1,173 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dc_bios_types.h" +#include "hw_shared.h" +#include "dcn31_apg.h" +#include "reg_helper.h" + +#define DC_LOGGER \ + apg31->base.ctx->logger + +#define REG(reg)\ + (apg31->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + apg31->apg_shift->field_name, apg31->apg_mask->field_name + + +#define CTX \ + apg31->base.ctx + + +static void apg31_enable( + struct apg *apg) +{ + struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); + + /* Reset APG */ + REG_UPDATE(APG_CONTROL, APG_RESET, 1); + REG_WAIT(APG_CONTROL, + APG_RESET_DONE, 1, + 1, 10); + REG_UPDATE(APG_CONTROL, APG_RESET, 0); + REG_WAIT(APG_CONTROL, + APG_RESET_DONE, 0, + 1, 10); + + /* Enable APG */ + REG_UPDATE(APG_CONTROL2, APG_ENABLE, 1); +} + +static void apg31_disable( + struct apg *apg) +{ + struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); + + /* Disable APG */ + REG_UPDATE(APG_CONTROL2, APG_ENABLE, 0); +} + +static union audio_cea_channels speakers_to_channels( + struct audio_speaker_flags speaker_flags) +{ + union audio_cea_channels cea_channels = {0}; + + /* these are one to one */ + cea_channels.channels.FL = speaker_flags.FL_FR; + cea_channels.channels.FR = speaker_flags.FL_FR; + cea_channels.channels.LFE = speaker_flags.LFE; + cea_channels.channels.FC = speaker_flags.FC; + + /* if Rear Left and Right exist move RC speaker to channel 7 + * otherwise to channel 5 + */ + if (speaker_flags.RL_RR) { + cea_channels.channels.RL_RC = speaker_flags.RL_RR; + cea_channels.channels.RR = speaker_flags.RL_RR; + cea_channels.channels.RC_RLC_FLC = speaker_flags.RC; + } else { + cea_channels.channels.RL_RC = speaker_flags.RC; + } + + /* FRONT Left Right Center and REAR Left Right Center are exclusive */ + if (speaker_flags.FLC_FRC) { + cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC; + cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC; + } else { + cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC; + cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC; + } + + return cea_channels; +} + +static void apg31_se_audio_setup( + struct apg *apg, + unsigned int az_inst, + struct audio_info *audio_info) +{ + struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); + + uint32_t speakers = 0; + uint32_t channels = 0; + + ASSERT(audio_info); + /* This should not happen.it does so we don't get BSOD*/ + if (audio_info == NULL) + return; + + speakers = audio_info->flags.info.ALLSPEAKERS; + channels = speakers_to_channels(audio_info->flags.speaker_flags).all; + + /* DisplayPort only allows for one audio stream with stream ID 0 */ + REG_UPDATE(APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, 0); + + /* When running in "pair mode", pairs of audio channels have their own enable + * this is for really old audio drivers */ + REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xF); + // REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, channels); + + /* Disable forced mem power off */ + REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0); + + apg31_enable(apg); +} + +static void apg31_audio_mute_control( + struct apg *apg, + bool mute) +{ + if (mute) + apg31_disable(apg); + else + apg31_enable(apg); +} + +static struct apg_funcs dcn31_apg_funcs = { + .se_audio_setup = apg31_se_audio_setup, + .audio_mute_control = apg31_audio_mute_control, + .enable_apg = apg31_enable, + .disable_apg = apg31_disable, +}; + +void apg31_construct(struct dcn31_apg *apg31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_apg_registers *apg_regs, + const struct dcn31_apg_shift *apg_shift, + const struct dcn31_apg_mask *apg_mask) +{ + apg31->base.ctx = ctx; + + apg31->base.inst = inst; + apg31->base.funcs = &dcn31_apg_funcs; + + apg31->regs = apg_regs; + apg31->apg_shift = apg_shift; + apg31->apg_mask = apg_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h new file mode 100644 index 000000000000..24f568e120d8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h @@ -0,0 +1,115 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_AGP_H__ +#define __DAL_DCN31_AGP_H__ + + +#define DCN31_APG_FROM_APG(apg)\ + container_of(apg, struct dcn31_apg, base) + +#define APG_DCN31_REG_LIST(id) \ + SRI(APG_CONTROL, APG, id), \ + SRI(APG_CONTROL2, APG, id),\ + SRI(APG_MEM_PWR, APG, id),\ + SRI(APG_DBG_GEN_CONTROL, APG, id) + +struct dcn31_apg_registers { + uint32_t APG_CONTROL; + uint32_t APG_CONTROL2; + uint32_t APG_MEM_PWR; + uint32_t APG_DBG_GEN_CONTROL; +}; + + +#define DCN31_APG_MASK_SH_LIST(mask_sh)\ + SE_SF(APG0_APG_CONTROL, APG_RESET, mask_sh),\ + SE_SF(APG0_APG_CONTROL, APG_RESET_DONE, mask_sh),\ + SE_SF(APG0_APG_CONTROL2, APG_ENABLE, mask_sh),\ + SE_SF(APG0_APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, mask_sh),\ + SE_SF(APG0_APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, mask_sh),\ + SE_SF(APG0_APG_MEM_PWR, APG_MEM_PWR_FORCE, mask_sh) + +#define APG_DCN31_REG_FIELD_LIST(type) \ + type APG_RESET;\ + type APG_RESET_DONE;\ + type APG_ENABLE;\ + type APG_DP_AUDIO_STREAM_ID;\ + type APG_DBG_AUDIO_CHANNEL_ENABLE;\ + type APG_MEM_PWR_FORCE + +struct dcn31_apg_shift { + APG_DCN31_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_apg_mask { + APG_DCN31_REG_FIELD_LIST(uint32_t); +}; + +struct apg { + const struct apg_funcs *funcs; + struct dc_context *ctx; + int inst; +}; + +struct apg_funcs { + + void (*setup_hdmi_audio)( + struct apg *apg); + + void (*se_audio_setup)( + struct apg *apg, + unsigned int az_inst, + struct audio_info *audio_info); + + void (*audio_mute_control)( + struct apg *apg, + bool mute); + + void (*enable_apg)( + struct apg *apg); + + void (*disable_apg)( + struct apg *apg); +}; + + + +struct dcn31_apg { + struct apg base; + const struct dcn31_apg_registers *regs; + const struct dcn31_apg_shift *apg_shift; + const struct dcn31_apg_mask *apg_mask; +}; + +void apg31_construct(struct dcn31_apg *apg3, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_apg_registers *apg_regs, + const struct dcn31_apg_shift *apg_shift, + const struct dcn31_apg_mask *apg_mask); + + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 696c9307715d..9896adf67425 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -42,6 +42,155 @@ #define DC_LOGGER \ dccg->ctx->logger +void dccg31_set_dpstreamclk( + struct dccg *dccg, + enum hdmistreamclk_source src, + int otg_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* enabled to select one of the DTBCLKs for pipe */ + switch (otg_inst) { + case 0: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE0_EN, (src == REFCLK) ? 0 : 1); + break; + case 1: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE1_EN, (src == REFCLK) ? 0 : 1); + break; + case 2: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE2_EN, (src == REFCLK) ? 0 : 1); + break; + case 3: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE3_EN, (src == REFCLK) ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_enable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst, + enum phyd32clk_clock_source phyd32clk) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* select one of the PHYD32CLKs as the source for symclk32_se */ + switch (hpo_se_inst) { + case 0: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE0_SRC_SEL, phyd32clk, + SYMCLK32_SE0_EN, 1); + break; + case 1: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE1_SRC_SEL, phyd32clk, + SYMCLK32_SE1_EN, 1); + break; + case 2: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE2_SRC_SEL, phyd32clk, + SYMCLK32_SE2_EN, 1); + break; + case 3: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE3_SRC_SEL, phyd32clk, + SYMCLK32_SE3_EN, 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_disable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* set refclk as the source for symclk32_se */ + switch (hpo_se_inst) { + case 0: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE0_SRC_SEL, 0, + SYMCLK32_SE0_EN, 0); + break; + case 1: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE1_SRC_SEL, 0, + SYMCLK32_SE1_EN, 0); + break; + case 2: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE2_SRC_SEL, 0, + SYMCLK32_SE2_EN, 0); + break; + case 3: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE3_SRC_SEL, 0, + SYMCLK32_SE3_EN, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_enable_symclk32_le( + struct dccg *dccg, + int hpo_le_inst, + enum phyd32clk_clock_source phyd32clk) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* select one of the PHYD32CLKs as the source for symclk32_le */ + switch (hpo_le_inst) { + case 0: + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE0_SRC_SEL, phyd32clk, + SYMCLK32_LE0_EN, 1); + break; + case 1: + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE1_SRC_SEL, phyd32clk, + SYMCLK32_LE1_EN, 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_disable_symclk32_le( + struct dccg *dccg, + int hpo_le_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* set refclk as the source for symclk32_le */ + switch (hpo_le_inst) { + case 0: + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE0_SRC_SEL, 0, + SYMCLK32_LE0_EN, 0); + break; + case 1: + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE1_SRC_SEL, 0, + SYMCLK32_LE1_EN, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + void dccg31_set_physymclk( struct dccg *dccg, int phy_inst, @@ -241,12 +390,25 @@ static void dccg31_set_dispclk_change_mode( void dccg31_init(struct dccg *dccg) { + /* Set HPO stream encoder to use refclk to avoid case where PHY is + * disabled and SYMCLK32 for HPO SE is sourced from PHYD32CLK which + * will cause DCN to hang. + */ + dccg31_disable_symclk32_se(dccg, 0); + dccg31_disable_symclk32_se(dccg, 1); + dccg31_disable_symclk32_se(dccg, 2); + dccg31_disable_symclk32_se(dccg, 3); } static const struct dccg_funcs dccg31_funcs = { .update_dpp_dto = dccg2_update_dpp_dto, .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, .dccg_init = dccg31_init, + .set_dpstreamclk = dccg31_set_dpstreamclk, + .enable_symclk32_se = dccg31_enable_symclk32_se, + .disable_symclk32_se = dccg31_disable_symclk32_se, + .enable_symclk32_le = dccg31_enable_symclk32_le, + .disable_symclk32_le = dccg31_disable_symclk32_le, .set_physymclk = dccg31_set_physymclk, .set_dtbclk_dto = dccg31_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index 706ad80ba873..1e5aabcb7799 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -130,6 +130,24 @@ struct dccg *dccg31_create( void dccg31_init(struct dccg *dccg); +void dccg31_enable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst, + enum phyd32clk_clock_source phyd32clk); + +void dccg31_disable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst); + +void dccg31_enable_symclk32_le( + struct dccg *dccg, + int hpo_le_inst, + enum phyd32clk_clock_source phyd32clk); + +void dccg31_disable_symclk32_le( + struct dccg *dccg, + int hpo_le_inst); + void dccg31_set_physymclk( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 90127c1f9e35..4f0a0803db6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -37,6 +37,7 @@ #include "link_enc_cfg.h" #include "dc_dmub_srv.h" +#include "dal_asic_id.h" #define CTX \ enc10->base.ctx @@ -215,7 +216,7 @@ static const struct link_encoder_funcs dcn31_link_enc_funcs = { .fec_is_active = enc2_fec_is_active, .get_dig_frontend = dcn10_get_dig_frontend, .get_dig_mode = dcn10_get_dig_mode, - .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode, + .is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode, .get_max_link_cap = dcn20_link_encoder_get_max_link_cap, .set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux, }; @@ -320,6 +321,10 @@ void dcn31_link_encoder_construct( enc10->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE; + enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN; + enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN; + enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN; enc10->base.features.flags.bits.DP_IS_USB_C = bp_cap_info.DP_IS_USB_C; } else { @@ -363,7 +368,7 @@ void dcn31_link_encoder_enable_dp_output( enum clock_source_id clock_source) { /* Enable transmitter and encoder. */ - if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) { + if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) { dcn20_link_encoder_enable_dp_output(enc, link_settings, clock_source); @@ -379,7 +384,7 @@ void dcn31_link_encoder_enable_dp_mst_output( enum clock_source_id clock_source) { /* Enable transmitter and encoder. */ - if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) { + if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) { dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source); @@ -394,7 +399,7 @@ void dcn31_link_encoder_disable_output( enum signal_type signal) { /* Disable transmitter and encoder. */ - if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) { + if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) { dcn10_link_encoder_disable_output(enc, signal); @@ -404,3 +409,33 @@ void dcn31_link_encoder_disable_output( } } +bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t dp_alt_mode_disable; + bool is_usb_c_alt_mode = false; + + if (enc->features.flags.bits.DP_IS_USB_C) { + if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { + // [Note] no need to check hw_internal_rev once phy mux selection is ready + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + } else { + /* + * B0 phys use a new set of registers to check whether alt mode is disabled. + * if value == 1 alt mode is disabled, otherwise it is enabled. + */ + if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) + || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) + || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + } else { + // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready + REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + } + } + + is_usb_c_alt_mode = (dp_alt_mode_disable == 0); + } + + return is_usb_c_alt_mode; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h index 32d146312838..bec50e4402ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h @@ -69,6 +69,7 @@ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \ SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \ + SRI(RDPCSPIPE_PHY_CNTL6, RDPCSPIPE, id), \ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \ @@ -115,7 +116,9 @@ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ - LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ + LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ + LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ + LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\ @@ -243,4 +246,10 @@ void dcn31_link_encoder_disable_output( struct link_encoder *enc, enum signal_type signal); +/* + * Check whether USB-C DP Alt mode is disabled + */ +bool dcn31_link_encoder_is_in_alt_mode( + struct link_encoder *enc); + #endif /* __DC_LINK_ENCODER__DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c new file mode 100644 index 000000000000..6c08e21bb708 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c @@ -0,0 +1,616 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "dcn31_hpo_dp_link_encoder.h" +#include "reg_helper.h" +#include "dc_link.h" +#include "stream_encoder.h" + +#define DC_LOGGER \ + enc3->base.ctx->logger + +#define REG(reg)\ + (enc3->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc3->hpo_le_shift->field_name, enc3->hpo_le_mask->field_name + + +#define CTX \ + enc3->base.ctx + +enum { + DP_SAT_UPDATE_MAX_RETRY = 200 +}; + +void dcn31_hpo_dp_link_enc_enable( + struct hpo_dp_link_encoder *enc, + enum dc_lane_count num_lanes) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t dp_link_enabled; + + /* get current status of link enabled */ + REG_GET(DP_DPHY_SYM32_STATUS, + STATUS, &dp_link_enabled); + + /* Enable clocks first */ + REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 1); + + /* Reset DPHY. Only reset if going from disable to enable */ + if (!dp_link_enabled) { + REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 1); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 0); + } + + /* Configure DPHY settings */ + REG_UPDATE_3(DP_DPHY_SYM32_CONTROL, + DPHY_ENABLE, 1, + PRECODER_ENABLE, 1, + NUM_LANES, num_lanes == LANE_COUNT_ONE ? 0 : num_lanes == LANE_COUNT_TWO ? 1 : 3); +} + +void dcn31_hpo_dp_link_enc_disable( + struct hpo_dp_link_encoder *enc) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + + /* Configure DPHY settings */ + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + DPHY_ENABLE, 0); + + /* Shut down clock last */ + REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 0); +} + +void dcn31_hpo_dp_link_enc_set_link_test_pattern( + struct hpo_dp_link_encoder *enc, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t tp_custom; + + switch (tp_params->dp_phy_pattern) { + case DP_TEST_PATTERN_VIDEO_MODE: + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_LINK_ACTIVE); + break; + case DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE: + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_LINK_TRAINING_TPS1); + break; + case DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE: + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_LINK_TRAINING_TPS2); + break; + case DP_TEST_PATTERN_128b_132b_TPS1: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_TPS1, + TP_SELECT1, DP_DPHY_TP_SELECT_TPS1, + TP_SELECT2, DP_DPHY_TP_SELECT_TPS1, + TP_SELECT3, DP_DPHY_TP_SELECT_TPS1); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_128b_132b_TPS2: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_TPS2, + TP_SELECT1, DP_DPHY_TP_SELECT_TPS2, + TP_SELECT2, DP_DPHY_TP_SELECT_TPS2, + TP_SELECT3, DP_DPHY_TP_SELECT_TPS2); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS7: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS7, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS7, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS7, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS7); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS9: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS9, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS9, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS9, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS9); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS11: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS11, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS11, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS11, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS11); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS15: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS15, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS15, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS15, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS15); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS23: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS23, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS23, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS23, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS23); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS31: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS31, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS31, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS31, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS31); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_264BIT_CUSTOM: + tp_custom = (tp_params->custom_pattern[2] << 16) | (tp_params->custom_pattern[1] << 8) | tp_params->custom_pattern[0]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM0, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[5] << 16) | (tp_params->custom_pattern[4] << 8) | tp_params->custom_pattern[3]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM1, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[8] << 16) | (tp_params->custom_pattern[7] << 8) | tp_params->custom_pattern[6]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM2, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[11] << 16) | (tp_params->custom_pattern[10] << 8) | tp_params->custom_pattern[9]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM3, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[14] << 16) | (tp_params->custom_pattern[13] << 8) | tp_params->custom_pattern[12]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM4, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[17] << 16) | (tp_params->custom_pattern[16] << 8) | tp_params->custom_pattern[15]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM5, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[20] << 16) | (tp_params->custom_pattern[19] << 8) | tp_params->custom_pattern[18]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM6, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[23] << 16) | (tp_params->custom_pattern[22] << 8) | tp_params->custom_pattern[21]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM7, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[26] << 16) | (tp_params->custom_pattern[25] << 8) | tp_params->custom_pattern[24]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM8, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[29] << 16) | (tp_params->custom_pattern[28] << 8) | tp_params->custom_pattern[27]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM9, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[32] << 16) | (tp_params->custom_pattern[31] << 8) | tp_params->custom_pattern[30]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM10, 0, TP_CUSTOM, tp_custom); + + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_CUSTOM, + TP_SELECT1, DP_DPHY_TP_SELECT_CUSTOM, + TP_SELECT2, DP_DPHY_TP_SELECT_CUSTOM, + TP_SELECT3, DP_DPHY_TP_SELECT_CUSTOM); + + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_SQUARE_PULSE: + REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0, + TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]); + + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_SQUARE, + TP_SELECT1, DP_DPHY_TP_SELECT_SQUARE, + TP_SELECT2, DP_DPHY_TP_SELECT_SQUARE, + TP_SELECT3, DP_DPHY_TP_SELECT_SQUARE); + + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + default: + break; + } +} + +static void fill_stream_allocation_row_info( + const struct link_mst_stream_allocation *stream_allocation, + uint32_t *src, + uint32_t *slots) +{ + const struct hpo_dp_stream_encoder *stream_enc = stream_allocation->hpo_dp_stream_enc; + + if (stream_enc && (stream_enc->id >= ENGINE_ID_HPO_DP_0)) { + *src = stream_enc->id - ENGINE_ID_HPO_DP_0; + *slots = stream_allocation->slot_count; + } else { + *src = 0; + *slots = 0; + } +} + +/* programs DP VC payload allocation */ +void dcn31_hpo_dp_link_enc_update_stream_allocation_table( + struct hpo_dp_link_encoder *enc, + const struct link_mst_stream_allocation_table *table) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t slots = 0; + uint32_t src = 0; + + /* --- Set MSE Stream Attribute - + * Setup VC Payload Table on Tx Side, + * Issue allocation change trigger + * to commit payload on both tx and rx side + */ + + /* we should clean-up table each time */ + + if (table->stream_count >= 1) { + fill_stream_allocation_row_info( + &table->stream_allocations[0], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC0, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + if (table->stream_count >= 2) { + fill_stream_allocation_row_info( + &table->stream_allocations[1], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC1, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + if (table->stream_count >= 3) { + fill_stream_allocation_row_info( + &table->stream_allocations[2], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC2, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + if (table->stream_count >= 4) { + fill_stream_allocation_row_info( + &table->stream_allocations[3], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC3, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + /* --- wait for transaction finish */ + + /* send allocation change trigger (ACT) + * this step first sends the ACT, + * then double buffers the SAT into the hardware + * making the new allocation active on the DP MST mode link + */ + + /* SAT_UPDATE: + * 0 - No Action + * 1 - Update SAT with trigger + * 2 - Update SAT without trigger + */ + REG_UPDATE(DP_DPHY_SYM32_SAT_UPDATE, + SAT_UPDATE, 1); + + /* wait for update to complete + * (i.e. SAT_UPDATE_PENDING field is set to 0) + * No need for HW to enforce keepout. + */ + /* Best case and worst case wait time for SAT_UPDATE_PENDING + * best: 109 us + * worst: 868 us + */ + REG_WAIT(DP_DPHY_SYM32_STATUS, + SAT_UPDATE_PENDING, 0, + 10, DP_SAT_UPDATE_MAX_RETRY); +} + +void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( + struct hpo_dp_link_encoder *enc, + uint32_t stream_encoder_inst, + struct fixed31_32 avg_time_slots_per_mtp) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t x = dc_fixpt_floor( + avg_time_slots_per_mtp); + uint32_t y = dc_fixpt_ceil( + dc_fixpt_shl( + dc_fixpt_sub_int( + avg_time_slots_per_mtp, + x), + 25)); + + switch (stream_encoder_inst) { + case 0: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + case 1: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + case 2: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + case 3: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + default: + ASSERT(0); + } + + /* Best case and worst case wait time for RATE_UPDATE_PENDING + * best: 116 ns + * worst: 903 ns + */ + /* wait for update to be completed on the link */ + REG_WAIT(DP_DPHY_SYM32_STATUS, + RATE_UPDATE_PENDING, 0, + 1, 10); +} + +static bool dcn31_hpo_dp_link_enc_is_in_alt_mode( + struct hpo_dp_link_encoder *enc) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t dp_alt_mode_disable = 0; + + ASSERT((enc->transmitter >= TRANSMITTER_UNIPHY_A) && (enc->transmitter <= TRANSMITTER_UNIPHY_E)); + + /* if value == 1 alt mode is disabled, otherwise it is enabled */ + REG_GET(RDPCSTX_PHY_CNTL6[enc->transmitter], RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + return (dp_alt_mode_disable == 0); +} + +void dcn31_hpo_dp_link_enc_read_state( + struct hpo_dp_link_encoder *enc, + struct hpo_dp_link_enc_state *state) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + + ASSERT(state); + + REG_GET(DP_DPHY_SYM32_STATUS, + STATUS, &state->link_enc_enabled); + REG_GET(DP_DPHY_SYM32_CONTROL, + NUM_LANES, &state->lane_count); + REG_GET(DP_DPHY_SYM32_CONTROL, + MODE, (uint32_t *)&state->link_mode); + + REG_GET_2(DP_DPHY_SYM32_SAT_VC0, + SAT_STREAM_SOURCE, &state->stream_src[0], + SAT_SLOT_COUNT, &state->slot_count[0]); + REG_GET_2(DP_DPHY_SYM32_SAT_VC1, + SAT_STREAM_SOURCE, &state->stream_src[1], + SAT_SLOT_COUNT, &state->slot_count[1]); + REG_GET_2(DP_DPHY_SYM32_SAT_VC2, + SAT_STREAM_SOURCE, &state->stream_src[2], + SAT_SLOT_COUNT, &state->slot_count[2]); + REG_GET_2(DP_DPHY_SYM32_SAT_VC3, + SAT_STREAM_SOURCE, &state->stream_src[3], + SAT_SLOT_COUNT, &state->slot_count[3]); + + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, + STREAM_VC_RATE_X, &state->vc_rate_x[0], + STREAM_VC_RATE_Y, &state->vc_rate_y[0]); + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, + STREAM_VC_RATE_X, &state->vc_rate_x[1], + STREAM_VC_RATE_Y, &state->vc_rate_y[1]); + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, + STREAM_VC_RATE_X, &state->vc_rate_x[2], + STREAM_VC_RATE_Y, &state->vc_rate_y[2]); + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, + STREAM_VC_RATE_X, &state->vc_rate_x[3], + STREAM_VC_RATE_Y, &state->vc_rate_y[3]); +} + +static enum bp_result link_transmitter_control( + struct dcn31_hpo_dp_link_encoder *enc3, + struct bp_transmitter_control *cntl) +{ + enum bp_result result; + struct dc_bios *bp = enc3->base.ctx->dc_bios; + + result = bp->funcs->transmitter_control(bp, cntl); + + return result; +} + +/* enables DP PHY output for 128b132b encoding */ +void dcn31_hpo_dp_link_enc_enable_dp_output( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + enum transmitter transmitter) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* Set the transmitter */ + enc3->base.transmitter = transmitter; + + /* Enable the PHY */ + cntl.action = TRANSMITTER_CONTROL_ENABLE; + cntl.engine_id = ENGINE_ID_UNKNOWN; + cntl.transmitter = enc3->base.transmitter; + //cntl.pll_id = clock_source; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; + cntl.lanes_number = link_settings->lane_count; + cntl.hpd_sel = enc3->base.hpd_source; + cntl.pixel_clock = link_settings->link_rate * 1000; + cntl.color_depth = COLOR_DEPTH_UNDEFINED; + cntl.hpo_engine_id = enc->inst + ENGINE_ID_HPO_DP_0; + + result = link_transmitter_control(enc3, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} + +void dcn31_hpo_dp_link_enc_disable_output( + struct hpo_dp_link_encoder *enc, + enum signal_type signal) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* disable transmitter */ + cntl.action = TRANSMITTER_CONTROL_DISABLE; + cntl.transmitter = enc3->base.transmitter; + cntl.hpd_sel = enc3->base.hpd_source; + cntl.signal = signal; + + result = link_transmitter_control(enc3, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + return; + } + + /* disable encoder */ + dcn31_hpo_dp_link_enc_disable(enc); +} + +void dcn31_hpo_dp_link_enc_set_ffe( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + uint8_t ffe_preset) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* disable transmitter */ + cntl.transmitter = enc3->base.transmitter; + cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; + cntl.lanes_number = link_settings->lane_count; + cntl.pixel_clock = link_settings->link_rate * 1000; + cntl.lane_settings = ffe_preset; + + result = link_transmitter_control(enc3, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + return; + } +} + +static struct hpo_dp_link_encoder_funcs dcn31_hpo_dp_link_encoder_funcs = { + .enable_link_phy = dcn31_hpo_dp_link_enc_enable_dp_output, + .disable_link_phy = dcn31_hpo_dp_link_enc_disable_output, + .link_enable = dcn31_hpo_dp_link_enc_enable, + .link_disable = dcn31_hpo_dp_link_enc_disable, + .set_link_test_pattern = dcn31_hpo_dp_link_enc_set_link_test_pattern, + .update_stream_allocation_table = dcn31_hpo_dp_link_enc_update_stream_allocation_table, + .set_throttled_vcp_size = dcn31_hpo_dp_link_enc_set_throttled_vcp_size, + .is_in_alt_mode = dcn31_hpo_dp_link_enc_is_in_alt_mode, + .read_state = dcn31_hpo_dp_link_enc_read_state, + .set_ffe = dcn31_hpo_dp_link_enc_set_ffe, +}; + +void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, + const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, + const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask) +{ + enc31->base.ctx = ctx; + + enc31->base.inst = inst; + enc31->base.funcs = &dcn31_hpo_dp_link_encoder_funcs; + enc31->base.hpd_source = HPD_SOURCEID_UNKNOWN; + enc31->base.transmitter = TRANSMITTER_UNKNOWN; + + enc31->regs = hpo_le_regs; + enc31->hpo_le_shift = hpo_le_shift; + enc31->hpo_le_mask = hpo_le_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h new file mode 100644 index 000000000000..0706ccaf6fec --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h @@ -0,0 +1,222 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_HPO_DP_LINK_ENCODER_H__ +#define __DAL_DCN31_HPO_DP_LINK_ENCODER_H__ + +#include "link_encoder.h" + + +#define DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(hpo_dp_link_encoder)\ + container_of(hpo_dp_link_encoder, struct dcn31_hpo_dp_link_encoder, base) + + +#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id) \ + SRI(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \ + SRI(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) + +#define DCN3_1_RDPCSTX_REG_LIST(id) \ + SRII(RDPCSTX_PHY_CNTL6, RDPCSTX, id) + + +#define DCN3_1_HPO_DP_LINK_ENC_REGS \ + uint32_t DP_LINK_ENC_CLOCK_CONTROL;\ + uint32_t DP_DPHY_SYM32_CONTROL;\ + uint32_t DP_DPHY_SYM32_STATUS;\ + uint32_t DP_DPHY_SYM32_TP_CONFIG;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED0;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED1;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED2;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED3;\ + uint32_t DP_DPHY_SYM32_TP_SQ_PULSE;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM0;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM1;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM2;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM3;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM4;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM5;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM6;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM7;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM8;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM9;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM10;\ + uint32_t DP_DPHY_SYM32_SAT_VC0;\ + uint32_t DP_DPHY_SYM32_SAT_VC1;\ + uint32_t DP_DPHY_SYM32_SAT_VC2;\ + uint32_t DP_DPHY_SYM32_SAT_VC3;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL0;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL1;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL2;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL3;\ + uint32_t DP_DPHY_SYM32_SAT_UPDATE + +struct dcn31_hpo_dp_link_encoder_registers { + DCN3_1_HPO_DP_LINK_ENC_REGS; + uint32_t RDPCSTX_PHY_CNTL6[5]; +}; + +#define DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(mask_sh)\ + SE_SF(DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_RESET, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_ENABLE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, PRECODER_ENABLE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, MODE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, NUM_LANES, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, STATUS, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, RATE_UPDATE_PENDING, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0, TP_CUSTOM, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT1, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT2, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT3, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\ + SE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_Y, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, mask_sh) + +#define DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(type) \ + type DP_LINK_ENC_CLOCK_EN;\ + type DPHY_RESET;\ + type DPHY_ENABLE;\ + type PRECODER_ENABLE;\ + type NUM_LANES;\ + type MODE;\ + type STATUS;\ + type SAT_UPDATE_PENDING;\ + type RATE_UPDATE_PENDING;\ + type TP_CUSTOM;\ + type TP_SELECT0;\ + type TP_SELECT1;\ + type TP_SELECT2;\ + type TP_SELECT3;\ + type TP_PRBS_SEL0;\ + type TP_PRBS_SEL1;\ + type TP_PRBS_SEL2;\ + type TP_PRBS_SEL3;\ + type TP_SQ_PULSE_WIDTH;\ + type SAT_STREAM_SOURCE;\ + type SAT_SLOT_COUNT;\ + type STREAM_VC_RATE_X;\ + type STREAM_VC_RATE_Y;\ + type SAT_UPDATE;\ + type RDPCS_PHY_DPALT_DISABLE + + +struct dcn31_hpo_dp_link_encoder_shift { + DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_hpo_dp_link_encoder_mask { + DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_hpo_dp_link_encoder { + struct hpo_dp_link_encoder base; + const struct dcn31_hpo_dp_link_encoder_registers *regs; + const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift; + const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask; +}; + +void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, + const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, + const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask); + +void dcn31_hpo_dp_link_enc_enable_dp_output( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + enum transmitter transmitter); + +void dcn31_hpo_dp_link_enc_disable_output( + struct hpo_dp_link_encoder *enc, + enum signal_type signal); + +void dcn31_hpo_dp_link_enc_enable( + struct hpo_dp_link_encoder *enc, + enum dc_lane_count num_lanes); + +void dcn31_hpo_dp_link_enc_disable( + struct hpo_dp_link_encoder *enc); + +void dcn31_hpo_dp_link_enc_set_link_test_pattern( + struct hpo_dp_link_encoder *enc, + struct encoder_set_dp_phy_pattern_param *tp_params); + +void dcn31_hpo_dp_link_enc_update_stream_allocation_table( + struct hpo_dp_link_encoder *enc, + const struct link_mst_stream_allocation_table *table); + +void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( + struct hpo_dp_link_encoder *enc, + uint32_t stream_encoder_inst, + struct fixed31_32 avg_time_slots_per_mtp); + +void dcn31_hpo_dp_link_enc_read_state( + struct hpo_dp_link_encoder *enc, + struct hpo_dp_link_enc_state *state); + +void dcn31_hpo_dp_link_enc_set_ffe( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + uint8_t ffe_preset); + +#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c new file mode 100644 index 000000000000..565f12dd179a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -0,0 +1,752 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "dcn31_hpo_dp_stream_encoder.h" +#include "reg_helper.h" +#include "dc_link.h" + +#define DC_LOGGER \ + enc3->base.ctx->logger + +#define REG(reg)\ + (enc3->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc3->hpo_se_shift->field_name, enc3->hpo_se_mask->field_name + +#define CTX \ + enc3->base.ctx + + +enum dp2_pixel_encoding { + DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444, + DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422, + DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420, + DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY +}; + +enum dp2_uncompressed_component_depth { + DP_SYM32_ENC_COMPONENT_DEPTH_6BPC, + DP_SYM32_ENC_COMPONENT_DEPTH_8BPC, + DP_SYM32_ENC_COMPONENT_DEPTH_10BPC, + DP_SYM32_ENC_COMPONENT_DEPTH_12BPC +}; + + +static void dcn31_hpo_dp_stream_enc_enable_stream( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Enable all clocks in the DP_STREAM_ENC */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, + DP_STREAM_ENC_CLOCK_EN, 1); + + /* Assert reset to the DP_SYM32_ENC logic */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET, 1); + /* Wait for reset to complete (to assert) */ + REG_WAIT(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET_DONE, 1, + 1, 10); + + /* De-assert reset to the DP_SYM32_ENC logic */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET, 0); + /* Wait for reset to de-assert */ + REG_WAIT(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET_DONE, 0, + 1, 10); + + /* Enable idle pattern generation */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_ENABLE, 1); +} + +static void dcn31_hpo_dp_stream_enc_dp_unblank( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_source) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Set the input mux for video stream source */ + REG_UPDATE(DP_STREAM_ENC_INPUT_MUX_CONTROL, + DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, stream_source); + + /* Enable video transmission in main framer */ + REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_ENABLE, 1); + + /* Reset and Enable Pixel to Symbol FIFO */ + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_RESET, 1); + REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 1, + 1, 10); + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_RESET, 0); + REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, /* Disable Clock Ramp Adjuster FIFO */ + PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 0, + 1, 10); + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_ENABLE, 1); + + /* Reset and Enable Clock Ramp Adjuster FIFO */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET, 1); + REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET_DONE, 1, + 1, 10); + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET, 0); + REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET_DONE, 0, + 1, 10); + + /* For Debug -- Enable CRC */ + REG_UPDATE_2(DP_SYM32_ENC_VID_CRC_CONTROL, + CRC_ENABLE, 1, + CRC_CONT_MODE_ENABLE, 1); + + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_ENABLE, 1); +} + +static void dcn31_hpo_dp_stream_enc_dp_blank( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Disable video transmission */ + REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_ENABLE, 0); + + /* Wait for video stream transmission disabled + * Larger delay to wait until VBLANK - use max retry of + * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode + + * a little more because we may not trust delay accuracy. + */ + //REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL, + // VID_STREAM_STATUS, 0, + // 10, 5000); + + /* Disable SDP tranmission */ + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 0); + + /* Disable Pixel to Symbol FIFO */ + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_ENABLE, 0); + + /* Disable Clock Ramp Adjuster FIFO */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_ENABLE, 0); +} + +static void dcn31_hpo_dp_stream_enc_disable( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Disable DP_SYM32_ENC */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_ENABLE, 0); + + /* Disable clocks in the DP_STREAM_ENC */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, + DP_STREAM_ENC_CLOCK_EN, 0); +} + +static void dcn31_hpo_dp_stream_enc_set_stream_attribute( + struct hpo_dp_stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, + bool compressed_format, + bool double_buffer_en) +{ + enum dp2_pixel_encoding pixel_encoding; + enum dp2_uncompressed_component_depth component_depth; + uint32_t h_active_start; + uint32_t v_active_start; + uint32_t h_blank; + uint32_t h_back_porch; + uint32_t h_width; + uint32_t v_height; + unsigned long long v_freq; + uint8_t misc0 = 0; + uint8_t misc1 = 0; + uint8_t hsp; + uint8_t vsp; + + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + struct dc_crtc_timing hw_crtc_timing = *crtc_timing; + + /* MISC0[0] = 0 video and link clocks are asynchronous + * MISC1[0] = 0 interlace not supported + * MISC1[2:1] = 0 stereo field is handled by hardware + * MISC1[5:3] = 0 Reserved + */ + + /* Interlaced not supported */ + if (hw_crtc_timing.flags.INTERLACE) { + BREAK_TO_DEBUGGER(); + } + + /* Double buffer enable for MSA and pixel format registers + * Only double buffer for changing stream attributes for active streams + * Do not double buffer when initially enabling a stream + */ + REG_UPDATE(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, + MSA_DOUBLE_BUFFER_ENABLE, double_buffer_en); + REG_UPDATE(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, + PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, double_buffer_en); + + /* Pixel Encoding */ + switch (hw_crtc_timing.pixel_encoding) { + case PIXEL_ENCODING_YCBCR422: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422; + misc0 = misc0 | 0x2; // MISC0[2:1] = 01 + break; + case PIXEL_ENCODING_YCBCR444: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; + misc0 = misc0 | 0x4; // MISC0[2:1] = 10 + + if (hw_crtc_timing.flags.Y_ONLY) { + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY; + if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) { + /* HW testing only, no use case yet. + * Color depth of Y-only could be + * 8, 10, 12, 16 bits + */ + misc1 = misc1 | 0x80; // MISC1[7] = 1 + } + } + break; + case PIXEL_ENCODING_YCBCR420: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420; + misc1 = misc1 | 0x40; // MISC1[6] = 1 + break; + case PIXEL_ENCODING_RGB: + default: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; + break; + } + + /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used. + * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the + * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, + * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). + */ + if (use_vsc_sdp_for_colorimetry) + misc1 = misc1 | 0x40; + else + misc1 = misc1 & ~0x40; + + /* Color depth */ + switch (hw_crtc_timing.display_color_depth) { + case COLOR_DEPTH_666: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; + // MISC0[7:5] = 000 + break; + case COLOR_DEPTH_888: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_8BPC; + misc0 = misc0 | 0x20; // MISC0[7:5] = 001 + break; + case COLOR_DEPTH_101010: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_10BPC; + misc0 = misc0 | 0x40; // MISC0[7:5] = 010 + break; + case COLOR_DEPTH_121212: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_12BPC; + misc0 = misc0 | 0x60; // MISC0[7:5] = 011 + break; + default: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; + break; + } + + REG_UPDATE_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, + PIXEL_ENCODING_TYPE, compressed_format, + UNCOMPRESSED_PIXEL_ENCODING, pixel_encoding, + UNCOMPRESSED_COMPONENT_DEPTH, component_depth); + + switch (output_color_space) { + case COLOR_SPACE_SRGB: + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + break; + case COLOR_SPACE_SRGB_LIMITED: + misc0 = misc0 | 0x8; /* bit3=1 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: + misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) + misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ + break; + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: + misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) + misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ + break; + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_XR_RGB: + case COLOR_SPACE_MSREF_SCRGB: + case COLOR_SPACE_ADOBERGB: + case COLOR_SPACE_DCIP3: + case COLOR_SPACE_XV_YCC_709: + case COLOR_SPACE_XV_YCC_601: + case COLOR_SPACE_DISPLAYNATIVE: + case COLOR_SPACE_DOLBYVISION: + case COLOR_SPACE_APPCTRL: + case COLOR_SPACE_CUSTOMPOINTS: + case COLOR_SPACE_UNKNOWN: + case COLOR_SPACE_YCBCR709_BLACK: + /* do nothing */ + break; + } + + /* calculate from vesa timing parameters + * h_active_start related to leading edge of sync + */ + h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left - + hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right; + + h_back_porch = h_blank - hw_crtc_timing.h_front_porch - + hw_crtc_timing.h_sync_width; + + /* start at beginning of left border */ + h_active_start = hw_crtc_timing.h_sync_width + h_back_porch; + + v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top - + hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom - + hw_crtc_timing.v_front_porch; + + h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right; + v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom; + hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0x80 : 0; + vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0x80 : 0; + v_freq = hw_crtc_timing.pix_clk_100hz * 100; + + /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1 + * + * Lane 0 Lane 1 Lane 2 Lane 3 + * MSA[0] = { 0, 0, 0, VFREQ[47:40]} + * MSA[1] = { 0, 0, 0, VFREQ[39:32]} + * MSA[2] = { 0, 0, 0, VFREQ[31:24]} + * MSA[3] = { HTotal[15:8], HStart[15:8], HWidth[15:8], VFREQ[23:16]} + * MSA[4] = { HTotal[ 7:0], HStart[ 7:0], HWidth[ 7:0], VFREQ[15: 8]} + * MSA[5] = { VTotal[15:8], VStart[15:8], VHeight[15:8], VFREQ[ 7: 0]} + * MSA[6] = { VTotal[ 7:0], VStart[ 7:0], VHeight[ 7:0], MISC0[ 7: 0]} + * MSA[7] = { HSP|HSW[14:8], VSP|VSW[14:8], 0, MISC1[ 7: 0]} + * MSA[8] = { HSW[ 7:0], VSW[ 7:0], 0, 0} + */ + REG_SET_4(DP_SYM32_ENC_VID_MSA0, 0, + MSA_DATA_LANE_0, 0, + MSA_DATA_LANE_1, 0, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, v_freq >> 40); + + REG_SET_4(DP_SYM32_ENC_VID_MSA1, 0, + MSA_DATA_LANE_0, 0, + MSA_DATA_LANE_1, 0, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, (v_freq >> 32) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA2, 0, + MSA_DATA_LANE_0, 0, + MSA_DATA_LANE_1, 0, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, (v_freq >> 24) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA3, 0, + MSA_DATA_LANE_0, hw_crtc_timing.h_total >> 8, + MSA_DATA_LANE_1, h_active_start >> 8, + MSA_DATA_LANE_2, h_width >> 8, + MSA_DATA_LANE_3, (v_freq >> 16) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA4, 0, + MSA_DATA_LANE_0, hw_crtc_timing.h_total & 0xff, + MSA_DATA_LANE_1, h_active_start & 0xff, + MSA_DATA_LANE_2, h_width & 0xff, + MSA_DATA_LANE_3, (v_freq >> 8) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA5, 0, + MSA_DATA_LANE_0, hw_crtc_timing.v_total >> 8, + MSA_DATA_LANE_1, v_active_start >> 8, + MSA_DATA_LANE_2, v_height >> 8, + MSA_DATA_LANE_3, v_freq & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA6, 0, + MSA_DATA_LANE_0, hw_crtc_timing.v_total & 0xff, + MSA_DATA_LANE_1, v_active_start & 0xff, + MSA_DATA_LANE_2, v_height & 0xff, + MSA_DATA_LANE_3, misc0); + + REG_SET_4(DP_SYM32_ENC_VID_MSA7, 0, + MSA_DATA_LANE_0, hsp | (hw_crtc_timing.h_sync_width >> 8), + MSA_DATA_LANE_1, vsp | (hw_crtc_timing.v_sync_width >> 8), + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, misc1); + + REG_SET_4(DP_SYM32_ENC_VID_MSA8, 0, + MSA_DATA_LANE_0, hw_crtc_timing.h_sync_width & 0xff, + MSA_DATA_LANE_1, hw_crtc_timing.v_sync_width & 0xff, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, 0); +} + +static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( + struct hpo_dp_stream_encoder *enc, + const struct encoder_info_frame *info_frame) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + uint32_t dmdata_packet_enabled = 0; + bool sdp_stream_enable = false; + + if (info_frame->vsc.valid) { + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 0, /* packetIndex */ + &info_frame->vsc, + true); + sdp_stream_enable = true; + } + if (info_frame->spd.valid) { + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 2, /* packetIndex */ + &info_frame->spd, + true); + sdp_stream_enable = true; + } + if (info_frame->hdrsmd.valid) { + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 3, /* packetIndex */ + &info_frame->hdrsmd, + true); + sdp_stream_enable = true; + } + /* enable/disable transmission of packet(s). + * If enabled, packet transmission begins on the next frame + */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid); + + /* check if dynamic metadata packet transmission is enabled */ + REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, + METADATA_PACKET_ENABLE, &dmdata_packet_enabled); + + /* Enable secondary data path */ + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 1); +} + +static void dcn31_hpo_dp_stream_enc_stop_dp_info_packets( + struct hpo_dp_stream_encoder *enc) +{ + /* stop generic packets on DP */ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + uint32_t asp_enable = 0; + uint32_t atp_enable = 0; + uint32_t aip_enable = 0; + uint32_t acm_enable = 0; + + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); + + /* Disable secondary data path if audio is also disabled */ + REG_GET_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, + ASP_ENABLE, &asp_enable, + ATP_ENABLE, &atp_enable, + AIP_ENABLE, &aip_enable, + ACM_ENABLE, &acm_enable); + if (!(asp_enable || atp_enable || aip_enable || acm_enable)) + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 0); +} + +static uint32_t hpo_dp_is_gsp_enabled( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + uint32_t gsp0_enabled = 0; + uint32_t gsp2_enabled = 0; + uint32_t gsp3_enabled = 0; + uint32_t gsp11_enabled = 0; + + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp0_enabled); + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp2_enabled); + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp3_enabled); + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp11_enabled); + + return (gsp0_enabled || gsp2_enabled || gsp3_enabled || gsp11_enabled); +} + +static void dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet( + struct hpo_dp_stream_encoder *enc, + bool enable, + uint8_t *dsc_packed_pps, + bool immediate_update) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + if (enable) { + struct dc_info_packet pps_sdp; + int i; + + /* Configure for PPS packet size (128 bytes) */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_PAYLOAD_SIZE, 3); + + /* Load PPS into infoframe (SDP) registers */ + pps_sdp.valid = true; + pps_sdp.hb0 = 0; + pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS; + pps_sdp.hb2 = 127; + pps_sdp.hb3 = 0; + + for (i = 0; i < 4; i++) { + memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32); + enc3->base.vpg->funcs->update_generic_info_packet( + enc3->base.vpg, + 11 + i, + &pps_sdp, + immediate_update); + } + + /* SW should make sure VBID[6] update line number is bigger + * than PPS transmit line number + */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_TRANSMISSION_LINE_NUMBER, 2); + + REG_UPDATE_2(DP_SYM32_ENC_VID_VBID_CONTROL, + VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, 0, + VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, 3); + + /* Send PPS data at the line number specified above. */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 1); + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 1); + } else { + /* Disable Generic Stream Packet 11 (GSP) transmission */ + REG_UPDATE_2(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0, + GSP_PAYLOAD_SIZE, 0); + } +} + +static void dcn31_hpo_dp_stream_enc_map_stream_to_link( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_enc_inst, + uint32_t link_enc_inst) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + ASSERT(stream_enc_inst < 4 && link_enc_inst < 2); + + switch (stream_enc_inst) { + case 0: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL0, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + case 1: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL1, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + case 2: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL2, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + case 3: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL3, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + } +} + +static void dcn31_hpo_dp_stream_enc_mute_control( + struct hpo_dp_stream_encoder *enc, + bool mute) +{ + ASSERT(enc->apg); + enc->apg->funcs->audio_mute_control(enc->apg, mute); +} + +static void dcn31_hpo_dp_stream_enc_audio_setup( + struct hpo_dp_stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Set the input mux for video stream source */ + REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL, + DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst); + + ASSERT(enc->apg); + enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info); +} + +static void dcn31_hpo_dp_stream_enc_audio_enable( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Enable Audio packets */ + REG_UPDATE(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, 1); + + /* Program the ATP and AIP next */ + REG_UPDATE_2(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, + ATP_ENABLE, 1, + AIP_ENABLE, 1); + + /* Enable secondary data path */ + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 1); + + /* Enable APG block */ + enc->apg->funcs->enable_apg(enc->apg); +} + +static void dcn31_hpo_dp_stream_enc_audio_disable( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Disable Audio packets */ + REG_UPDATE_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, + ASP_ENABLE, 0, + ATP_ENABLE, 0, + AIP_ENABLE, 0, + ACM_ENABLE, 0); + + /* Disable STP Stream Enable if other SDP GSP are also disabled */ + if (!(hpo_dp_is_gsp_enabled(enc))) + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 0); + + /* Disable APG block */ + enc->apg->funcs->disable_apg(enc->apg); +} + +static void dcn31_hpo_dp_stream_enc_read_state( + struct hpo_dp_stream_encoder *enc, + struct hpo_dp_stream_encoder_state *s) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + REG_GET(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_ENABLE, &s->stream_enc_enabled); + REG_GET(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_ENABLE, &s->vid_stream_enabled); + REG_GET(DP_STREAM_ENC_INPUT_MUX_CONTROL, + DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, &s->otg_inst); + + REG_GET_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, + PIXEL_ENCODING_TYPE, &s->compressed_format, + UNCOMPRESSED_PIXEL_ENCODING, &s->pixel_encoding, + UNCOMPRESSED_COMPONENT_DEPTH, &s->component_depth); + + REG_GET(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, &s->sdp_enabled); + + switch (enc->inst) { + case 0: + REG_GET(DP_STREAM_MAPPER_CONTROL0, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + case 1: + REG_GET(DP_STREAM_MAPPER_CONTROL1, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + case 2: + REG_GET(DP_STREAM_MAPPER_CONTROL2, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + case 3: + REG_GET(DP_STREAM_MAPPER_CONTROL3, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + } +} + +static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { + .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream, + .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank, + .dp_blank = dcn31_hpo_dp_stream_enc_dp_blank, + .disable = dcn31_hpo_dp_stream_enc_disable, + .set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute, + .update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets, + .stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets, + .dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet, + .map_stream_to_link = dcn31_hpo_dp_stream_enc_map_stream_to_link, + .audio_mute_control = dcn31_hpo_dp_stream_enc_mute_control, + .dp_audio_setup = dcn31_hpo_dp_stream_enc_audio_setup, + .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable, + .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable, + .read_state = dcn31_hpo_dp_stream_enc_read_state, +}; + +void dcn31_hpo_dp_stream_encoder_construct( + struct dcn31_hpo_dp_stream_encoder *enc3, + struct dc_context *ctx, + struct dc_bios *bp, + uint32_t inst, + enum engine_id eng_id, + struct vpg *vpg, + struct apg *apg, + const struct dcn31_hpo_dp_stream_encoder_registers *regs, + const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift, + const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask) +{ + enc3->base.funcs = &dcn30_str_enc_funcs; + enc3->base.ctx = ctx; + enc3->base.inst = inst; + enc3->base.id = eng_id; + enc3->base.bp = bp; + enc3->base.vpg = vpg; + enc3->base.apg = apg; + enc3->regs = regs; + enc3->hpo_se_shift = hpo_se_shift; + enc3->hpo_se_mask = hpo_se_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h new file mode 100644 index 000000000000..70b94fc25304 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h @@ -0,0 +1,241 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__ +#define __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__ + +#include "dcn30/dcn30_vpg.h" +#include "dcn31/dcn31_apg.h" +#include "stream_encoder.h" + + +#define DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(hpo_dp_stream_encoder)\ + container_of(hpo_dp_stream_encoder, struct dcn31_hpo_dp_stream_encoder, base) + + +/* Define MSA_DATA_LANE_[0-3] fields to make programming easier */ +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0__SHIFT 0x0 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1__SHIFT 0x8 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2__SHIFT 0x10 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3__SHIFT 0x18 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0_MASK 0x000000FFL +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1_MASK 0x0000FF00L +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2_MASK 0x00FF0000L +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3_MASK 0xFF000000L + + +#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id) \ + SR(DP_STREAM_MAPPER_CONTROL0),\ + SR(DP_STREAM_MAPPER_CONTROL1),\ + SR(DP_STREAM_MAPPER_CONTROL2),\ + SR(DP_STREAM_MAPPER_CONTROL3),\ + SRI(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id),\ + SRI(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id),\ + SRI(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id),\ + SRI(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id),\ + SRI(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id) + +#define DCN3_1_HPO_DP_STREAM_ENC_REGS \ + uint32_t DP_STREAM_MAPPER_CONTROL0;\ + uint32_t DP_STREAM_MAPPER_CONTROL1;\ + uint32_t DP_STREAM_MAPPER_CONTROL2;\ + uint32_t DP_STREAM_MAPPER_CONTROL3;\ + uint32_t DP_STREAM_ENC_CLOCK_CONTROL;\ + uint32_t DP_STREAM_ENC_INPUT_MUX_CONTROL;\ + uint32_t DP_STREAM_ENC_AUDIO_CONTROL;\ + uint32_t DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0;\ + uint32_t DP_SYM32_ENC_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT;\ + uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_MSA0;\ + uint32_t DP_SYM32_ENC_VID_MSA1;\ + uint32_t DP_SYM32_ENC_VID_MSA2;\ + uint32_t DP_SYM32_ENC_VID_MSA3;\ + uint32_t DP_SYM32_ENC_VID_MSA4;\ + uint32_t DP_SYM32_ENC_VID_MSA5;\ + uint32_t DP_SYM32_ENC_VID_MSA6;\ + uint32_t DP_SYM32_ENC_VID_MSA7;\ + uint32_t DP_SYM32_ENC_VID_MSA8;\ + uint32_t DP_SYM32_ENC_VID_MSA_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_FIFO_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_STREAM_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_VBID_CONTROL;\ + uint32_t DP_SYM32_ENC_SDP_CONTROL;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL0;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL2;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL3;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL5;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\ + uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\ + uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\ + uint32_t DP_SYM32_ENC_VID_CRC_CONTROL + + +#define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\ + SE_SF(DP_STREAM_MAPPER_CONTROL0, DP_STREAM_LINK_TARGET, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC_CLOCK_EN, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET_DONE, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET_DONE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_PIXEL_ENCODING, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_COMPONENT_DEPTH, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, MSA_DOUBLE_BUFFER_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_0, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_1, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_2, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_3, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET_DONE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_STATUS, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_PAYLOAD_SIZE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, METADATA_PACKET_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AUDIO_MUTE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ATP_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh) + + +#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \ + type DP_STREAM_LINK_TARGET;\ + type DP_STREAM_ENC_CLOCK_EN;\ + type DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL;\ + type DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL;\ + type FIFO_RESET;\ + type FIFO_RESET_DONE;\ + type FIFO_ENABLE;\ + type DP_SYM32_ENC_RESET;\ + type DP_SYM32_ENC_RESET_DONE;\ + type DP_SYM32_ENC_ENABLE;\ + type PIXEL_ENCODING_TYPE;\ + type UNCOMPRESSED_PIXEL_ENCODING;\ + type UNCOMPRESSED_COMPONENT_DEPTH;\ + type PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE;\ + type MSA_DOUBLE_BUFFER_ENABLE;\ + type MSA_DATA_LANE_0;\ + type MSA_DATA_LANE_1;\ + type MSA_DATA_LANE_2;\ + type MSA_DATA_LANE_3;\ + type PIXEL_TO_SYMBOL_FIFO_RESET;\ + type PIXEL_TO_SYMBOL_FIFO_RESET_DONE;\ + type PIXEL_TO_SYMBOL_FIFO_ENABLE;\ + type VID_STREAM_ENABLE;\ + type VID_STREAM_STATUS;\ + type VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE;\ + type VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER;\ + type SDP_STREAM_ENABLE;\ + type AUDIO_MUTE;\ + type ASP_ENABLE;\ + type ATP_ENABLE;\ + type AIP_ENABLE;\ + type ACM_ENABLE;\ + type GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE;\ + type GSP_PAYLOAD_SIZE;\ + type GSP_TRANSMISSION_LINE_NUMBER;\ + type GSP_SOF_REFERENCE;\ + type METADATA_PACKET_ENABLE;\ + type CRC_ENABLE;\ + type CRC_CONT_MODE_ENABLE + + +struct dcn31_hpo_dp_stream_encoder_registers { + DCN3_1_HPO_DP_STREAM_ENC_REGS; +}; + +struct dcn31_hpo_dp_stream_encoder_shift { + DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_hpo_dp_stream_encoder_mask { + DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_hpo_dp_stream_encoder { + struct hpo_dp_stream_encoder base; + const struct dcn31_hpo_dp_stream_encoder_registers *regs; + const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift; + const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask; +}; + + +void dcn31_hpo_dp_stream_encoder_construct( + struct dcn31_hpo_dp_stream_encoder *enc3, + struct dc_context *ctx, + struct dc_bios *bp, + uint32_t inst, + enum engine_id eng_id, + struct vpg *vpg, + struct apg *apg, + const struct dcn31_hpo_dp_stream_encoder_registers *regs, + const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift, + const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask); + + +#endif // __DAL_DCN31_HPO_STREAM_ENCODER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 3f2333ec67e2..0713910a3aa9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -48,6 +48,7 @@ #include "dc_link_dp.h" #include "inc/link_dpcd.h" #include "dcn10/dcn10_hw_sequencer.h" +#include "inc/link_enc_cfg.h" #define DC_LOGGER_INIT(logger) @@ -157,6 +158,9 @@ void dcn31_init_hw(struct dc *dc) */ struct dc_link *link = dc->links[i]; + if (link->ep_type != DISPLAY_ENDPOINT_PHY) + continue; + link->link_enc->funcs->hw_init(link->link_enc); /* Check for enabled DIG to identify enabled display */ @@ -184,7 +188,8 @@ void dcn31_init_hw(struct dc *dc) &dpcd_power_state, sizeof(dpcd_power_state)); if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { /* blank dp stream before power off receiver*/ - if (dc->links[i]->link_enc->funcs->get_dig_frontend) { + if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY && + dc->links[i]->link_enc->funcs->get_dig_frontend) { unsigned int fe; fe = dc->links[i]->link_enc->funcs->get_dig_frontend( @@ -194,7 +199,7 @@ void dcn31_init_hw(struct dc *dc) for (j = 0; j < dc->res_pool->stream_enc_count; j++) { if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank( + dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], dc->res_pool->stream_enc[j]); break; } @@ -248,7 +253,8 @@ void dcn31_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->link_enc->funcs->is_dig_enabled && + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc) && dc->hwss.power_down) { dc->hwss.power_down(dc); @@ -420,7 +426,7 @@ void dcn31_z10_save_init(struct dc *dc) dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); } -void dcn31_z10_restore(struct dc *dc) +void dcn31_z10_restore(const struct dc *dc) { union dmub_rb_cmd cmd; @@ -594,19 +600,7 @@ void dcn31_reset_hw_ctx_wrap( old_clk->funcs->cs_power_down(old_clk); } } -} - -bool dcn31_is_abm_supported(struct dc *dc, - struct dc_state *context, struct dc_stream_state *stream) -{ - int i; - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->stream == stream && - (pipe_ctx->prev_odm_pipe == NULL && pipe_ctx->next_odm_pipe == NULL)) - return true; - } - return false; + /* New dc_state in the process of being applied to hardware. */ + dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index 140435e4f7ff..7ae45dd202d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -43,7 +43,7 @@ void dcn31_enable_power_gating_plane( void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx); -void dcn31_z10_restore(struct dc *dc); +void dcn31_z10_restore(const struct dc *dc); void dcn31_z10_save_init(struct dc *dc); void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 40011cd3c8ef..c6a737781ad1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -34,6 +34,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn31_init_hw, + .power_down_on_boot = dcn10_power_down_on_boot, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, @@ -93,12 +94,12 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, - .is_abm_supported = dcn31_is_abm_supported, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index a7702d3c75cd..6f0c788d1904 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -52,7 +52,12 @@ #include "dcn30/dcn30_vpg.h" #include "dcn30/dcn30_afmt.h" #include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" #include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" @@ -363,7 +368,7 @@ static const struct dce110_clk_src_mask cs_mask = { #define abm_regs(id)\ [id] = {\ - ABM_DCN301_REG_LIST(id)\ + ABM_DCN302_REG_LIST(id)\ } static const struct dce_abm_registers abm_regs[] = { @@ -411,10 +416,10 @@ static const struct dce_audio_mask audio_mask = { #define vpg_regs(id)\ [id] = {\ - VPG_DCN3_REG_LIST(id)\ + VPG_DCN31_REG_LIST(id)\ } -static const struct dcn30_vpg_registers vpg_regs[] = { +static const struct dcn31_vpg_registers vpg_regs[] = { vpg_regs(0), vpg_regs(1), vpg_regs(2), @@ -427,20 +432,20 @@ static const struct dcn30_vpg_registers vpg_regs[] = { vpg_regs(9), }; -static const struct dcn30_vpg_shift vpg_shift = { - DCN3_VPG_MASK_SH_LIST(__SHIFT) +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) }; -static const struct dcn30_vpg_mask vpg_mask = { - DCN3_VPG_MASK_SH_LIST(_MASK) +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) }; #define afmt_regs(id)\ [id] = {\ - AFMT_DCN3_REG_LIST(id)\ + AFMT_DCN31_REG_LIST(id)\ } -static const struct dcn30_afmt_registers afmt_regs[] = { +static const struct dcn31_afmt_registers afmt_regs[] = { afmt_regs(0), afmt_regs(1), afmt_regs(2), @@ -449,12 +454,32 @@ static const struct dcn30_afmt_registers afmt_regs[] = { afmt_regs(5) }; -static const struct dcn30_afmt_shift afmt_shift = { - DCN3_AFMT_MASK_SH_LIST(__SHIFT) +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) }; -static const struct dcn30_afmt_mask afmt_mask = { - DCN3_AFMT_MASK_SH_LIST(_MASK) +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs(id)\ +[id] = {\ + APG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_apg_registers apg_regs[] = { + apg_regs(0), + apg_regs(1), + apg_regs(2), + apg_regs(3) +}; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) }; #define stream_enc_regs(id)\ @@ -538,6 +563,49 @@ static const struct dcn10_link_enc_mask le_mask = { DPCS_DCN31_MASK_SH_LIST(_MASK) }; +#define hpo_dp_stream_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ +} + +static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { + hpo_dp_stream_encoder_reg_list(0), + hpo_dp_stream_encoder_reg_list(1), + hpo_dp_stream_encoder_reg_list(2), + hpo_dp_stream_encoder_reg_list(3), +}; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_link_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ + DCN3_1_RDPCSTX_REG_LIST(0),\ + DCN3_1_RDPCSTX_REG_LIST(1),\ + DCN3_1_RDPCSTX_REG_LIST(2),\ + DCN3_1_RDPCSTX_REG_LIST(3),\ + DCN3_1_RDPCSTX_REG_LIST(4)\ +} + +static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { + hpo_dp_link_encoder_reg_list(0), + hpo_dp_link_encoder_reg_list(1), +}; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + #define dpp_regs(id)\ [id] = {\ DPP_REG_LIST_DCN30(id),\ @@ -879,6 +947,8 @@ static const struct resource_caps res_cap_dcn31 = { .num_audio = 5, .num_stream_encoder = 5, .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, .num_pll = 5, .num_dwb = 1, .num_ddc = 5, @@ -939,13 +1009,15 @@ static const struct dc_debug_options debug_defaults_drv = { .use_max_lb = true, .enable_mem_low_power = { .bits = { - .vga = false, - .i2c = false, + .vga = true, + .i2c = true, .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled - .dscl = false, - .cm = false, - .mpc = false, - .optc = false, + .dscl = true, + .cm = false, // visible flicker on OLED eDPs + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, } }, .optimize_edp_link_rate = true, @@ -1230,34 +1302,57 @@ static struct vpg *dcn31_vpg_create( struct dc_context *ctx, uint32_t inst) { - struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); - if (!vpg3) + if (!vpg31) return NULL; - vpg3_construct(vpg3, ctx, inst, + vpg31_construct(vpg31, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); - return &vpg3->base; + // Will re-enable hw block when we enable stream + // Check for enabled stream before powering down? + vpg31_powerdown(&vpg31->base); + + return &vpg31->base; } static struct afmt *dcn31_afmt_create( struct dc_context *ctx, uint32_t inst) { - struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); - if (!afmt3) + if (!afmt31) return NULL; - afmt3_construct(afmt3, ctx, inst, + afmt31_construct(afmt31, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); - return &afmt3->base; + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; } static struct stream_encoder *dcn31_stream_encoder_create( @@ -1281,8 +1376,12 @@ static struct stream_encoder *dcn31_stream_encoder_create( vpg = dcn31_vpg_create(ctx, vpg_inst); afmt = dcn31_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); return NULL; + } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, @@ -1292,6 +1391,72 @@ static struct stream_encoder *dcn31_stream_encoder_create( return &enc1->base; } +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + static struct dce_hwseq *dcn31_hwseq_create( struct dc_context *ctx) { @@ -1302,6 +1467,13 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; + /* DCN3.1 FPGA Workaround + * Need to enable HPO DP Stream Encoder before setting OTG master enable. + * To do so, move calling function enable_stream_timing to only be done AFTER calling + * function core_link_enable_stream + */ + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) + hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1309,6 +1481,8 @@ static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn31_create_audio, .create_stream_encoder = dcn31_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, .create_hwseq = dcn31_hwseq_create, }; @@ -1316,6 +1490,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .read_dce_straps = NULL, .create_audio = NULL, .create_stream_encoder = NULL, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, .create_hwseq = dcn31_hwseq_create, }; @@ -1338,6 +1514,28 @@ static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) } } + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); @@ -1647,6 +1845,15 @@ static void dcn31_calculate_wm_and_dlg_fp( if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) dcfclk = context->bw_ctx.dml.soc.min_dcfclk; + /* We don't recalculate clocks for 0 pipe configs, which can block + * S0i3 as high clocks will block low power states + * Override any clocks that can block S0i3 to min here + */ + if (pipe_cnt == 0) { + context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0 + return; + } + pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = dcfclk; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; @@ -1772,6 +1979,58 @@ static void dcn31_calculate_wm_and_dlg( DC_FP_END(); } +bool dcn31_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + + // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg + if (pipe_cnt == 0) + fast_validate = false; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + kfree(pipes); + + BW_VAL_TRACE_FINISH(); + + return out; +} + static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; @@ -1854,7 +2113,7 @@ static struct resource_funcs dcn31_res_pool_funcs = { .link_encs_assign = link_enc_cfg_link_encs_assign, .link_enc_unassign = link_enc_cfg_link_enc_unassign, .panel_cntl_create = dcn31_panel_cntl_create, - .validate_bandwidth = dcn30_validate_bandwidth, + .validate_bandwidth = dcn31_validate_bandwidth, .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn31_populate_dml_pipes_from_context, @@ -1929,6 +2188,7 @@ static bool dcn31_resource_construct( dc->caps.max_slave_rgb_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.dp_hpo = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.is_apu = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c new file mode 100644 index 000000000000..f1deb1c3c363 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c @@ -0,0 +1,87 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn31_vpg.h" +#include "reg_helper.h" +#include "dc/dc.h" + +#define DC_LOGGER \ + vpg31->base.ctx->logger + +#define REG(reg)\ + (vpg31->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + vpg31->vpg_shift->field_name, vpg31->vpg_mask->field_name + + +#define CTX \ + vpg31->base.ctx + +static struct vpg_funcs dcn31_vpg_funcs = { + .update_generic_info_packet = vpg3_update_generic_info_packet, + .vpg_poweron = vpg31_poweron, + .vpg_powerdown = vpg31_powerdown, +}; + +void vpg31_powerdown(struct vpg *vpg) +{ + struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg); + + if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false) + return; + + REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 0, VPG_GSP_LIGHT_SLEEP_FORCE, 1); +} + +void vpg31_poweron(struct vpg *vpg) +{ + struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg); + + if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false) + return; + + REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0); +} + +void vpg31_construct(struct dcn31_vpg *vpg31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_vpg_registers *vpg_regs, + const struct dcn31_vpg_shift *vpg_shift, + const struct dcn31_vpg_mask *vpg_mask) +{ + vpg31->base.ctx = ctx; + + vpg31->base.inst = inst; + vpg31->base.funcs = &dcn31_vpg_funcs; + + vpg31->regs = vpg_regs; + vpg31->vpg_shift = vpg_shift; + vpg31->vpg_mask = vpg_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h new file mode 100644 index 000000000000..0e76eabce441 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h @@ -0,0 +1,162 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_VPG_H__ +#define __DAL_DCN31_VPG_H__ + + +#define DCN31_VPG_FROM_VPG(vpg)\ + container_of(vpg, struct dcn31_vpg, base) + +#define VPG_DCN31_REG_LIST(id) \ + SRI(VPG_GENERIC_STATUS, VPG, id), \ + SRI(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \ + SRI(VPG_GENERIC_PACKET_DATA, VPG, id), \ + SRI(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \ + SRI(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id), \ + SRI(VPG_MEM_PWR, VPG, id) + +struct dcn31_vpg_registers { + uint32_t VPG_GENERIC_STATUS; + uint32_t VPG_GENERIC_PACKET_ACCESS_CTRL; + uint32_t VPG_GENERIC_PACKET_DATA; + uint32_t VPG_GSP_FRAME_UPDATE_CTRL; + uint32_t VPG_GSP_IMMEDIATE_UPDATE_CTRL; + uint32_t VPG_MEM_PWR; +}; + +#define DCN31_VPG_MASK_SH_LIST(mask_sh)\ + SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_OCCURED, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_CLR, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL, VPG_GENERIC_DATA_INDEX, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE0, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE1, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE2, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE3, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC0_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC1_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC2_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC3_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC4_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC5_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC6_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC7_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC8_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC9_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC10_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC11_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC12_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC13_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC14_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC8_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC9_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC10_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC11_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC12_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC13_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC14_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, mask_sh),\ + SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_LIGHT_SLEEP_FORCE, mask_sh),\ + SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, mask_sh) + +#define VPG_DCN31_REG_FIELD_LIST(type) \ + type VPG_GENERIC_CONFLICT_OCCURED;\ + type VPG_GENERIC_CONFLICT_CLR;\ + type VPG_GENERIC_DATA_INDEX;\ + type VPG_GENERIC_DATA_BYTE0;\ + type VPG_GENERIC_DATA_BYTE1;\ + type VPG_GENERIC_DATA_BYTE2;\ + type VPG_GENERIC_DATA_BYTE3;\ + type VPG_GENERIC0_FRAME_UPDATE;\ + type VPG_GENERIC1_FRAME_UPDATE;\ + type VPG_GENERIC2_FRAME_UPDATE;\ + type VPG_GENERIC3_FRAME_UPDATE;\ + type VPG_GENERIC4_FRAME_UPDATE;\ + type VPG_GENERIC5_FRAME_UPDATE;\ + type VPG_GENERIC6_FRAME_UPDATE;\ + type VPG_GENERIC7_FRAME_UPDATE;\ + type VPG_GENERIC8_FRAME_UPDATE;\ + type VPG_GENERIC9_FRAME_UPDATE;\ + type VPG_GENERIC10_FRAME_UPDATE;\ + type VPG_GENERIC11_FRAME_UPDATE;\ + type VPG_GENERIC12_FRAME_UPDATE;\ + type VPG_GENERIC13_FRAME_UPDATE;\ + type VPG_GENERIC14_FRAME_UPDATE;\ + type VPG_GENERIC0_IMMEDIATE_UPDATE;\ + type VPG_GENERIC1_IMMEDIATE_UPDATE;\ + type VPG_GENERIC2_IMMEDIATE_UPDATE;\ + type VPG_GENERIC3_IMMEDIATE_UPDATE;\ + type VPG_GENERIC4_IMMEDIATE_UPDATE;\ + type VPG_GENERIC5_IMMEDIATE_UPDATE;\ + type VPG_GENERIC6_IMMEDIATE_UPDATE;\ + type VPG_GENERIC7_IMMEDIATE_UPDATE;\ + type VPG_GENERIC8_IMMEDIATE_UPDATE;\ + type VPG_GENERIC9_IMMEDIATE_UPDATE;\ + type VPG_GENERIC10_IMMEDIATE_UPDATE;\ + type VPG_GENERIC11_IMMEDIATE_UPDATE;\ + type VPG_GENERIC12_IMMEDIATE_UPDATE;\ + type VPG_GENERIC13_IMMEDIATE_UPDATE;\ + type VPG_GENERIC14_IMMEDIATE_UPDATE;\ + type VPG_GSP_MEM_LIGHT_SLEEP_DIS;\ + type VPG_GSP_LIGHT_SLEEP_FORCE;\ + type VPG_GSP_MEM_PWR_STATE + +struct dcn31_vpg_shift { + VPG_DCN31_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_vpg_mask { + VPG_DCN31_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_vpg { + struct vpg base; + const struct dcn31_vpg_registers *regs; + const struct dcn31_vpg_shift *vpg_shift; + const struct dcn31_vpg_mask *vpg_mask; +}; + +void vpg31_poweron( + struct vpg *vpg); + +void vpg31_powerdown( + struct vpg *vpg); + +void vpg31_construct(struct dcn31_vpg *vpg31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_vpg_registers *vpg_regs, + const struct dcn31_vpg_shift *vpg_shift, + const struct dcn31_vpg_mask *vpg_mask); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index a9170b9f84d3..43f33e186088 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -37,6 +37,7 @@ struct cp_psp_stream_config { uint8_t phy_idx; uint8_t assr_enabled; uint8_t mst_enabled; + uint8_t dp2_enabled; void *dm_stream_ctx; bool dpms_off; }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 9ab854293ace..3a905fb667bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -160,6 +160,12 @@ void dm_set_dcn_clocks( struct dc_context *ctx, struct dc_clocks *clks); +#if defined(CONFIG_DRM_AMD_DC_DCN) +void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable); +#endif + +void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz); + bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable); void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 2091dd8c252d..246071c72f6b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -37,8 +37,8 @@ // static void dml20_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, - display_rq_params_st * rq_param, - const display_pipe_source_params_st pipe_src_param); + display_rq_params_st *rq_param, + const display_pipe_source_params_st *pipe_src_param); // Function: dml20_rq_dlg_get_dlg_params // Calculate deadline related parameters @@ -49,8 +49,8 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en); /* @@ -164,52 +164,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, - const display_data_rq_sizing_params_st rq_sizing) + const display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_DLG: %s: rq_sizing param\n", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_rq_params_st rq_param) + const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); - rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), + rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); - rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); + rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -218,9 +218,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes - / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes + / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma } else { detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0), @@ -233,7 +233,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -242,8 +242,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int full_swath_bytes_packed_c = 0; bool req128_l = false; bool req128_c = false; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -685,7 +685,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_source_params_st pipe_src_param, + const display_pipe_source_params_st *pipe_src_param, bool is_chroma) { bool mode_422 = false; @@ -697,15 +697,15 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { - vp_width = pipe_src_param.viewport_width_c / ppe; - vp_height = pipe_src_param.viewport_height_c; - data_pitch = pipe_src_param.data_pitch_c; - meta_pitch = pipe_src_param.meta_pitch_c; + vp_width = pipe_src_param->viewport_width_c / ppe; + vp_height = pipe_src_param->viewport_height_c; + data_pitch = pipe_src_param->data_pitch_c; + meta_pitch = pipe_src_param->meta_pitch_c; } else { - vp_width = pipe_src_param.viewport_width / ppe; - vp_height = pipe_src_param.viewport_height; - data_pitch = pipe_src_param.data_pitch; - meta_pitch = pipe_src_param.meta_pitch; + vp_width = pipe_src_param->viewport_width / ppe; + vp_height = pipe_src_param->viewport_height; + data_pitch = pipe_src_param->data_pitch; + meta_pitch = pipe_src_param->meta_pitch; } rq_sizing_param->chunk_bytes = 8192; @@ -728,21 +728,21 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, vp_height, data_pitch, meta_pitch, - pipe_src_param.source_format, - pipe_src_param.sw_mode, - pipe_src_param.macro_tile_size, - pipe_src_param.source_scan, + pipe_src_param->source_format, + pipe_src_param->sw_mode, + pipe_src_param->macro_tile_size, + pipe_src_param->source_scan, is_chroma); } static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { // get param for luma surface - rq_param->yuv420 = pipe_src_param.source_format == dm_420_8 - || pipe_src_param.source_format == dm_420_10; - rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10; + rq_param->yuv420 = pipe_src_param->source_format == dm_420_8 + || pipe_src_param->source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10; get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), @@ -751,7 +751,7 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, pipe_src_param, 0); - if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) { + if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) { // get param for chroma surface get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), @@ -763,20 +763,20 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, // calculate how to split the det buffer space between luma and chroma handle_det_buf_split(mode_lib, rq_param, pipe_src_param); - print__rq_params_st(mode_lib, *rq_param); + print__rq_params_st(mode_lib, rq_param); } void dml20_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = {0}; memset(rq_regs, 0, sizeof(*rq_regs)); - dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src); - extract_rq_regs(mode_lib, rq_regs, rq_param); + dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } // Note: currently taken in as is. @@ -787,8 +787,8 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en) { @@ -935,7 +935,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, * (double) ref_freq_to_pix_freq); ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13)); - min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; + min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz; t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes); min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); @@ -995,20 +995,20 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // vinit_bot_l = scl.vinit_bot; // vinit_bot_c = scl.vinit_bot_c; -// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height; - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; -// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; -// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub; -// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub; +// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; +// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; +// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub; +// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub; -// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; +// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + // dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub; vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; @@ -1137,16 +1137,16 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c); // Active - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; - meta_row_height_c = rq_dlg_param.rq_c.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; + meta_row_height_c = rq_dlg_param->rq_c.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; if (mode_422) { swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element @@ -1542,14 +1542,14 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1579,20 +1579,20 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src); + dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src); dml20_rq_dlg_get_dlg_params(mode_lib, e2e_pipe_param, num_pipes, pipe_idx, dlg_regs, ttu_regs, - rq_param.dlg, - dlg_sys_param, + &rq_param.dlg, + &dlg_sys_param, cstate_en, pstate_en); dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h index d0b90947f540..8b23867e97c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h @@ -43,7 +43,7 @@ struct display_mode_lib; void dml20_rq_dlg_get_rq_reg( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg @@ -61,7 +61,7 @@ void dml20_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 1a0c14e465fa..015e7f2c0b16 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -38,7 +38,7 @@ static void dml20v2_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, display_rq_params_st * rq_param, - const display_pipe_source_params_st pipe_src_param); + const display_pipe_source_params_st *pipe_src_param); // Function: dml20v2_rq_dlg_get_dlg_params // Calculate deadline related parameters @@ -49,8 +49,8 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en); /* @@ -164,52 +164,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, - const display_data_rq_sizing_params_st rq_sizing) + const display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_DLG: %s: rq_sizing param\n", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_rq_params_st rq_param) + const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); - rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), + rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); - rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); + rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -218,9 +218,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes - / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes + / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma } else { detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0), @@ -233,7 +233,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -242,8 +242,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int full_swath_bytes_packed_c = 0; bool req128_l = false; bool req128_c = false; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -685,7 +685,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_source_params_st pipe_src_param, + const display_pipe_source_params_st *pipe_src_param, bool is_chroma) { bool mode_422 = false; @@ -697,15 +697,15 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { - vp_width = pipe_src_param.viewport_width_c / ppe; - vp_height = pipe_src_param.viewport_height_c; - data_pitch = pipe_src_param.data_pitch_c; - meta_pitch = pipe_src_param.meta_pitch_c; + vp_width = pipe_src_param->viewport_width_c / ppe; + vp_height = pipe_src_param->viewport_height_c; + data_pitch = pipe_src_param->data_pitch_c; + meta_pitch = pipe_src_param->meta_pitch_c; } else { - vp_width = pipe_src_param.viewport_width / ppe; - vp_height = pipe_src_param.viewport_height; - data_pitch = pipe_src_param.data_pitch; - meta_pitch = pipe_src_param.meta_pitch; + vp_width = pipe_src_param->viewport_width / ppe; + vp_height = pipe_src_param->viewport_height; + data_pitch = pipe_src_param->data_pitch; + meta_pitch = pipe_src_param->meta_pitch; } rq_sizing_param->chunk_bytes = 8192; @@ -728,21 +728,21 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, vp_height, data_pitch, meta_pitch, - pipe_src_param.source_format, - pipe_src_param.sw_mode, - pipe_src_param.macro_tile_size, - pipe_src_param.source_scan, + pipe_src_param->source_format, + pipe_src_param->sw_mode, + pipe_src_param->macro_tile_size, + pipe_src_param->source_scan, is_chroma); } static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { // get param for luma surface - rq_param->yuv420 = pipe_src_param.source_format == dm_420_8 - || pipe_src_param.source_format == dm_420_10; - rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10; + rq_param->yuv420 = pipe_src_param->source_format == dm_420_8 + || pipe_src_param->source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10; get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), @@ -751,7 +751,7 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, pipe_src_param, 0); - if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) { + if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) { // get param for chroma surface get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), @@ -763,20 +763,20 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, // calculate how to split the det buffer space between luma and chroma handle_det_buf_split(mode_lib, rq_param, pipe_src_param); - print__rq_params_st(mode_lib, *rq_param); + print__rq_params_st(mode_lib, rq_param); } void dml20v2_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = {0}; memset(rq_regs, 0, sizeof(*rq_regs)); - dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src); - extract_rq_regs(mode_lib, rq_regs, rq_param); + dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } // Note: currently taken in as is. @@ -787,8 +787,8 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en) { @@ -935,7 +935,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, * (double) ref_freq_to_pix_freq); ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13)); - min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; + min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz; t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes); min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); @@ -996,20 +996,20 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // vinit_bot_l = scl.vinit_bot; // vinit_bot_c = scl.vinit_bot_c; -// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height; - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; -// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; -// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub; -// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub; +// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; +// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; +// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub; +// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub; -// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; +// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + // dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub; vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; @@ -1138,16 +1138,16 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c); // Active - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; - meta_row_height_c = rq_dlg_param.rq_c.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; + meta_row_height_c = rq_dlg_param->rq_c.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; if (mode_422) { swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element @@ -1543,14 +1543,14 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1580,20 +1580,20 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src); + dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src); dml20v2_rq_dlg_get_dlg_params(mode_lib, e2e_pipe_param, num_pipes, pipe_idx, dlg_regs, ttu_regs, - rq_param.dlg, - dlg_sys_param, + &rq_param.dlg, + &dlg_sys_param, cstate_en, pstate_en); dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h index 27cf8bed9376..2b4e46ea1c3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h @@ -43,7 +43,7 @@ struct display_mode_lib; void dml20v2_rq_dlg_get_rq_reg( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg @@ -61,7 +61,7 @@ void dml20v2_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 4136eb8256cb..8a7485e21d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -3394,6 +3394,127 @@ static unsigned int TruncToValidBPP( } } + +static noinline void CalculatePrefetchSchedulePerPlane( + struct display_mode_lib *mode_lib, + int i, + unsigned j, + unsigned k) +{ + struct vba_vars_st *locals = &mode_lib->vba; + Pipe myPipe; + HostVM myHostVM; + + if (mode_lib->vba.XFCEnabled[k] == true) { + mode_lib->vba.XFCRemoteSurfaceFlipDelay = + CalculateRemoteSurfaceFlipDelay( + mode_lib, + mode_lib->vba.VRatio[k], + locals->SwathWidthYThisState[k], + dml_ceil(locals->BytePerPixelInDETY[k], 1.0), + mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], + mode_lib->vba.XFCTSlvVupdateOffset, + mode_lib->vba.XFCTSlvVupdateWidth, + mode_lib->vba.XFCTSlvVreadyOffset, + mode_lib->vba.XFCXBUFLatencyTolerance, + mode_lib->vba.XFCFillBWOverhead, + mode_lib->vba.XFCSlvChunkSize, + mode_lib->vba.XFCBusTransportTime, + mode_lib->vba.TimeCalc, + mode_lib->vba.TWait, + &mode_lib->vba.SrcActiveDrainRate, + &mode_lib->vba.TInitXFill, + &mode_lib->vba.TslvChk); + } else { + mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0; + } + + myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k]; + myPipe.DISPCLK = locals->RequiredDISPCLK[i][j]; + myPipe.PixelClock = mode_lib->vba.PixelClock[k]; + myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; + myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k]; + myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k]; + myPipe.SourceScan = mode_lib->vba.SourceScan[k]; + myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k]; + myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k]; + myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k]; + myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k]; + myPipe.InterlaceEnable = mode_lib->vba.Interlace[k]; + myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k]; + myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]; + myPipe.HTotal = mode_lib->vba.HTotal[k]; + + + myHostVM.Enable = mode_lib->vba.HostVMEnable; + myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels; + myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels; + + + mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule( + mode_lib, + mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, + mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly, + &myPipe, + locals->DSCDelayPerState[i][k], + mode_lib->vba.DPPCLKDelaySubtotal, + mode_lib->vba.DPPCLKDelaySCL, + mode_lib->vba.DPPCLKDelaySCLLBOnly, + mode_lib->vba.DPPCLKDelayCNVCFormater, + mode_lib->vba.DPPCLKDelayCNVCCursor, + mode_lib->vba.DISPCLKDelaySubtotal, + locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k], + mode_lib->vba.OutputFormat[k], + mode_lib->vba.MaxInterDCNTileRepeaters, + dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]), + locals->MaximumVStartup[0][0][k], + mode_lib->vba.GPUVMMaxPageTableLevels, + mode_lib->vba.GPUVMEnable, + &myHostVM, + mode_lib->vba.DynamicMetadataEnable[k], + mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], + mode_lib->vba.DynamicMetadataTransmittedBytes[k], + mode_lib->vba.DCCEnable[k], + mode_lib->vba.UrgentLatency, + mode_lib->vba.ExtraLatency, + mode_lib->vba.TimeCalc, + locals->PDEAndMetaPTEBytesPerFrame[0][0][k], + locals->MetaRowBytes[0][0][k], + locals->DPTEBytesPerRow[0][0][k], + locals->PrefetchLinesY[0][0][k], + locals->SwathWidthYThisState[k], + locals->BytePerPixelInDETY[k], + locals->PrefillY[k], + locals->MaxNumSwY[k], + locals->PrefetchLinesC[0][0][k], + locals->BytePerPixelInDETC[k], + locals->PrefillC[k], + locals->MaxNumSwC[k], + locals->SwathHeightYThisState[k], + locals->SwathHeightCThisState[k], + mode_lib->vba.TWait, + mode_lib->vba.XFCEnabled[k], + mode_lib->vba.XFCRemoteSurfaceFlipDelay, + mode_lib->vba.ProgressiveToInterlaceUnitInOPP, + &locals->dst_x_after_scaler, + &locals->dst_y_after_scaler, + &locals->LineTimesForPrefetch[k], + &locals->PrefetchBW[k], + &locals->LinesForMetaPTE[k], + &locals->LinesForMetaAndDPTERow[k], + &locals->VRatioPreY[i][j][k], + &locals->VRatioPreC[i][j][k], + &locals->RequiredPrefetchPixelDataBWLuma[i][j][k], + &locals->RequiredPrefetchPixelDataBWChroma[i][j][k], + &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata, + &locals->Tno_bw[k], + &locals->prefetch_vmrow_bw[k], + locals->swath_width_luma_ub, + locals->swath_width_chroma_ub, + &mode_lib->vba.VUpdateOffsetPix[k], + &mode_lib->vba.VUpdateWidthPix[k], + &mode_lib->vba.VReadyOffsetPix[k]); +} void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) { struct vba_vars_st *locals = &mode_lib->vba; @@ -4676,120 +4797,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DRAMClockChangeLatency, mode_lib->vba.UrgentLatency, mode_lib->vba.SREnterPlusExitTime); - for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - Pipe myPipe; - HostVM myHostVM; - - if (mode_lib->vba.XFCEnabled[k] == true) { - mode_lib->vba.XFCRemoteSurfaceFlipDelay = - CalculateRemoteSurfaceFlipDelay( - mode_lib, - mode_lib->vba.VRatio[k], - locals->SwathWidthYThisState[k], - dml_ceil(locals->BytePerPixelInDETY[k], 1.0), - mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], - mode_lib->vba.XFCTSlvVupdateOffset, - mode_lib->vba.XFCTSlvVupdateWidth, - mode_lib->vba.XFCTSlvVreadyOffset, - mode_lib->vba.XFCXBUFLatencyTolerance, - mode_lib->vba.XFCFillBWOverhead, - mode_lib->vba.XFCSlvChunkSize, - mode_lib->vba.XFCBusTransportTime, - mode_lib->vba.TimeCalc, - mode_lib->vba.TWait, - &mode_lib->vba.SrcActiveDrainRate, - &mode_lib->vba.TInitXFill, - &mode_lib->vba.TslvChk); - } else { - mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0; - } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) + CalculatePrefetchSchedulePerPlane(mode_lib, i, j, k); - myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k]; - myPipe.DISPCLK = locals->RequiredDISPCLK[i][j]; - myPipe.PixelClock = mode_lib->vba.PixelClock[k]; - myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; - myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k]; - myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k]; - myPipe.SourceScan = mode_lib->vba.SourceScan[k]; - myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k]; - myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k]; - myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k]; - myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k]; - myPipe.InterlaceEnable = mode_lib->vba.Interlace[k]; - myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k]; - myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]; - myPipe.HTotal = mode_lib->vba.HTotal[k]; - - - myHostVM.Enable = mode_lib->vba.HostVMEnable; - myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels; - myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels; - - - mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule( - mode_lib, - mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, - mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly, - &myPipe, - locals->DSCDelayPerState[i][k], - mode_lib->vba.DPPCLKDelaySubtotal, - mode_lib->vba.DPPCLKDelaySCL, - mode_lib->vba.DPPCLKDelaySCLLBOnly, - mode_lib->vba.DPPCLKDelayCNVCFormater, - mode_lib->vba.DPPCLKDelayCNVCCursor, - mode_lib->vba.DISPCLKDelaySubtotal, - locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k], - mode_lib->vba.OutputFormat[k], - mode_lib->vba.MaxInterDCNTileRepeaters, - dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]), - locals->MaximumVStartup[0][0][k], - mode_lib->vba.GPUVMMaxPageTableLevels, - mode_lib->vba.GPUVMEnable, - &myHostVM, - mode_lib->vba.DynamicMetadataEnable[k], - mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], - mode_lib->vba.DynamicMetadataTransmittedBytes[k], - mode_lib->vba.DCCEnable[k], - mode_lib->vba.UrgentLatency, - mode_lib->vba.ExtraLatency, - mode_lib->vba.TimeCalc, - locals->PDEAndMetaPTEBytesPerFrame[0][0][k], - locals->MetaRowBytes[0][0][k], - locals->DPTEBytesPerRow[0][0][k], - locals->PrefetchLinesY[0][0][k], - locals->SwathWidthYThisState[k], - locals->BytePerPixelInDETY[k], - locals->PrefillY[k], - locals->MaxNumSwY[k], - locals->PrefetchLinesC[0][0][k], - locals->BytePerPixelInDETC[k], - locals->PrefillC[k], - locals->MaxNumSwC[k], - locals->SwathHeightYThisState[k], - locals->SwathHeightCThisState[k], - mode_lib->vba.TWait, - mode_lib->vba.XFCEnabled[k], - mode_lib->vba.XFCRemoteSurfaceFlipDelay, - mode_lib->vba.ProgressiveToInterlaceUnitInOPP, - &locals->dst_x_after_scaler, - &locals->dst_y_after_scaler, - &locals->LineTimesForPrefetch[k], - &locals->PrefetchBW[k], - &locals->LinesForMetaPTE[k], - &locals->LinesForMetaAndDPTERow[k], - &locals->VRatioPreY[i][j][k], - &locals->VRatioPreC[i][j][k], - &locals->RequiredPrefetchPixelDataBWLuma[i][j][k], - &locals->RequiredPrefetchPixelDataBWChroma[i][j][k], - &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata, - &locals->Tno_bw[k], - &locals->prefetch_vmrow_bw[k], - locals->swath_width_luma_ub, - locals->swath_width_chroma_ub, - &mode_lib->vba.VUpdateOffsetPix[k], - &mode_lib->vba.VUpdateWidthPix[k], - &mode_lib->vba.VReadyOffsetPix[k]); - } mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0; mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 287e31052b30..46c433c0bcb0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -141,55 +141,55 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs( struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, - const display_data_rq_sizing_params_st rq_sizing) + const display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_DLG: %s: rq_sizing param\n", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } static void extract_rq_regs( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_rq_params_st rq_param) + const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); rq_regs->rq_regs_l.pte_row_height_linear = dml_floor( - dml_log2(rq_param.dlg.rq_l.dpte_row_height), + dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); rq_regs->rq_regs_c.pte_row_height_linear = dml_floor( - dml_log2(rq_param.dlg.rq_c.dpte_row_height), + dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // FIXME: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -198,9 +198,9 @@ static void extract_rq_regs( rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes - / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes + / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma } else { detile_buf_plane1_addr = dml_round_to_multiple( @@ -215,7 +215,7 @@ static void extract_rq_regs( static void handle_det_buf_split( struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -224,8 +224,8 @@ static void handle_det_buf_split( unsigned int full_swath_bytes_packed_c = 0; bool req128_l = false; bool req128_c = false; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -694,7 +694,7 @@ static void get_surf_rq_param( display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_params_st pipe_param, + const display_pipe_params_st *pipe_param, bool is_chroma) { bool mode_422 = false; @@ -706,30 +706,30 @@ static void get_surf_rq_param( // FIXME check if ppe apply for both luma and chroma in 422 case if (is_chroma) { - vp_width = pipe_param.src.viewport_width_c / ppe; - vp_height = pipe_param.src.viewport_height_c; - data_pitch = pipe_param.src.data_pitch_c; - meta_pitch = pipe_param.src.meta_pitch_c; + vp_width = pipe_param->src.viewport_width_c / ppe; + vp_height = pipe_param->src.viewport_height_c; + data_pitch = pipe_param->src.data_pitch_c; + meta_pitch = pipe_param->src.meta_pitch_c; } else { - vp_width = pipe_param.src.viewport_width / ppe; - vp_height = pipe_param.src.viewport_height; - data_pitch = pipe_param.src.data_pitch; - meta_pitch = pipe_param.src.meta_pitch; + vp_width = pipe_param->src.viewport_width / ppe; + vp_height = pipe_param->src.viewport_height; + data_pitch = pipe_param->src.data_pitch; + meta_pitch = pipe_param->src.meta_pitch; } - if (pipe_param.dest.odm_combine) { + if (pipe_param->dest.odm_combine) { unsigned int access_dir; unsigned int full_src_vp_width; unsigned int hactive_half; unsigned int src_hactive_half; - access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed - hactive_half = pipe_param.dest.hactive / 2; + access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed + hactive_half = pipe_param->dest.hactive / 2; if (is_chroma) { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width; - src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_half; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width; + src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_half; } else { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width; - src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio * hactive_half; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width; + src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio * hactive_half; } if (access_dir == 0) { @@ -754,7 +754,7 @@ static void get_surf_rq_param( rq_sizing_param->meta_chunk_bytes = 2048; rq_sizing_param->min_meta_chunk_bytes = 256; - if (pipe_param.src.hostvm) + if (pipe_param->src.hostvm) rq_sizing_param->mpte_group_bytes = 512; else rq_sizing_param->mpte_group_bytes = 2048; @@ -768,23 +768,23 @@ static void get_surf_rq_param( vp_height, data_pitch, meta_pitch, - pipe_param.src.source_format, - pipe_param.src.sw_mode, - pipe_param.src.macro_tile_size, - pipe_param.src.source_scan, - pipe_param.src.hostvm, + pipe_param->src.source_format, + pipe_param->src.sw_mode, + pipe_param->src.macro_tile_size, + pipe_param->src.source_scan, + pipe_param->src.hostvm, is_chroma); } static void dml_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { // get param for luma surface - rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 - || pipe_param.src.source_format == dm_420_10; - rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10; + rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 + || pipe_param->src.source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10; get_surf_rq_param( mode_lib, @@ -794,7 +794,7 @@ static void dml_rq_dlg_get_rq_params( pipe_param, 0); - if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) { + if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) { // get param for chroma surface get_surf_rq_param( mode_lib, @@ -806,22 +806,22 @@ static void dml_rq_dlg_get_rq_params( } // calculate how to split the det buffer space between luma and chroma - handle_det_buf_split(mode_lib, rq_param, pipe_param.src); - print__rq_params_st(mode_lib, *rq_param); + handle_det_buf_split(mode_lib, rq_param, &pipe_param->src); + print__rq_params_st(mode_lib, rq_param); } void dml21_rq_dlg_get_rq_reg( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = {0}; memset(rq_regs, 0, sizeof(*rq_regs)); dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param); - extract_rq_regs(mode_lib, rq_regs, rq_param); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } // Note: currently taken in as is. @@ -833,8 +833,8 @@ static void dml_rq_dlg_get_dlg_params( const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en) { @@ -981,7 +981,7 @@ static void dml_rq_dlg_get_dlg_params( * (double) ref_freq_to_pix_freq); ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13)); - min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; + min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz; t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes); min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); @@ -1042,13 +1042,13 @@ static void dml_rq_dlg_get_dlg_params( scl_enable = scl->scl_enable; line_time_in_us = (htotal / pclk_freq_in_mhz); - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub; vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; @@ -1189,16 +1189,16 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c); // Active - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; - meta_row_height_c = rq_dlg_param.rq_c.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; + meta_row_height_c = rq_dlg_param->rq_c.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; if (mode_422) { swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element @@ -1650,15 +1650,15 @@ static void dml_rq_dlg_get_dlg_params( disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml21_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1691,12 +1691,12 @@ void dml21_rq_dlg_get_dlg_reg( dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe); + dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe); dml_rq_dlg_get_dlg_params( mode_lib, e2e_pipe_param, @@ -1704,8 +1704,8 @@ void dml21_rq_dlg_get_dlg_reg( pipe_idx, dlg_regs, ttu_regs, - rq_param.dlg, - dlg_sys_param, + &rq_param.dlg, + &dlg_sys_param, cstate_en, pstate_en); dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h index e8f7785e3fc6..af6ad0ca9cf8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h @@ -44,7 +44,7 @@ struct display_mode_lib; void dml21_rq_dlg_get_rq_reg( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg // Calculate and return DLG and TTU register struct given the system setting @@ -61,7 +61,7 @@ void dml21_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index 0d934fae1c3a..aef854270054 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -89,52 +89,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, - const display_data_rq_sizing_params_st rq_sizing) + const display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_DLG: %s: rq_sizing param\n", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_rq_params_st rq_param) + const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); - rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), + rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); - rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); + rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // FIXME: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param.yuv420 && rq_param.sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -143,9 +143,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double)rq_param.misc.rq_l.stored_swath_bytes - / (double)rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double)rq_param->misc.rq_l.stored_swath_bytes + / (double)rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma } else { detile_buf_plane1_addr = dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), @@ -158,7 +158,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -167,8 +167,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int full_swath_bytes_packed_c = 0; bool req128_l = false; bool req128_c = false; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -747,7 +747,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_params_st pipe_param, + const display_pipe_params_st *pipe_param, bool is_chroma, bool is_alpha) { @@ -761,32 +761,32 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, // FIXME check if ppe apply for both luma and chroma in 422 case if (is_chroma | is_alpha) { - vp_width = pipe_param.src.viewport_width_c / ppe; - vp_height = pipe_param.src.viewport_height_c; - data_pitch = pipe_param.src.data_pitch_c; - meta_pitch = pipe_param.src.meta_pitch_c; - surface_height = pipe_param.src.surface_height_y / 2.0; + vp_width = pipe_param->src.viewport_width_c / ppe; + vp_height = pipe_param->src.viewport_height_c; + data_pitch = pipe_param->src.data_pitch_c; + meta_pitch = pipe_param->src.meta_pitch_c; + surface_height = pipe_param->src.surface_height_y / 2.0; } else { - vp_width = pipe_param.src.viewport_width / ppe; - vp_height = pipe_param.src.viewport_height; - data_pitch = pipe_param.src.data_pitch; - meta_pitch = pipe_param.src.meta_pitch; - surface_height = pipe_param.src.surface_height_y; + vp_width = pipe_param->src.viewport_width / ppe; + vp_height = pipe_param->src.viewport_height; + data_pitch = pipe_param->src.data_pitch; + meta_pitch = pipe_param->src.meta_pitch; + surface_height = pipe_param->src.surface_height_y; } - if (pipe_param.dest.odm_combine) { + if (pipe_param->dest.odm_combine) { unsigned int access_dir = 0; unsigned int full_src_vp_width = 0; unsigned int hactive_odm = 0; unsigned int src_hactive_odm = 0; - access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed - hactive_odm = pipe_param.dest.hactive / ((unsigned int)pipe_param.dest.odm_combine*2); + access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed + hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine*2); if (is_chroma) { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width; - src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width; + src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm; } else { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width; - src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width; + src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm; } if (access_dir == 0) { @@ -815,7 +815,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, rq_sizing_param->meta_chunk_bytes = 2048; rq_sizing_param->min_meta_chunk_bytes = 256; - if (pipe_param.src.hostvm) + if (pipe_param->src.hostvm) rq_sizing_param->mpte_group_bytes = 512; else rq_sizing_param->mpte_group_bytes = 2048; @@ -828,28 +828,28 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, vp_height, data_pitch, meta_pitch, - pipe_param.src.source_format, - pipe_param.src.sw_mode, - pipe_param.src.macro_tile_size, - pipe_param.src.source_scan, - pipe_param.src.hostvm, + pipe_param->src.source_format, + pipe_param->src.sw_mode, + pipe_param->src.macro_tile_size, + pipe_param->src.source_scan, + pipe_param->src.hostvm, is_chroma, surface_height); } static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { // get param for luma surface - rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 - || pipe_param.src.source_format == dm_420_10 - || pipe_param.src.source_format == dm_rgbe_alpha - || pipe_param.src.source_format == dm_420_12; + rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 + || pipe_param->src.source_format == dm_420_10 + || pipe_param->src.source_format == dm_rgbe_alpha + || pipe_param->src.source_format == dm_420_12; - rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10; - rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha)?1:0; + rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha)?1:0; get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), @@ -859,7 +859,7 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, 0, 0); - if (is_dual_plane((enum source_format_class)(pipe_param.src.source_format))) { + if (is_dual_plane((enum source_format_class)(pipe_param->src.source_format))) { // get param for chroma surface get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), @@ -871,21 +871,21 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, } // calculate how to split the det buffer space between luma and chroma - handle_det_buf_split(mode_lib, rq_param, pipe_param.src); - print__rq_params_st(mode_lib, *rq_param); + handle_det_buf_split(mode_lib, rq_param, &pipe_param->src); + print__rq_params_st(mode_lib, rq_param); } void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = { 0 }; memset(rq_regs, 0, sizeof(*rq_regs)); dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param); - extract_rq_regs(mode_lib, rq_regs, rq_param); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, @@ -1824,14 +1824,14 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1861,12 +1861,12 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe); + dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe); dml_rq_dlg_get_dlg_params(mode_lib, e2e_pipe_param, num_pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h index c04965cceff3..625e41f8d575 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h @@ -41,7 +41,7 @@ struct display_mode_lib; // See also: <display_rq_regs_st> void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg // Calculate and return DLG and TTU register struct given the system setting @@ -57,7 +57,7 @@ void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index c23905bc733a..e0fecf127bd5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -175,47 +175,47 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si return (4 * 1024); } -static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st rq_sizing) +static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st *rq_sizing) { print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } -static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st rq_param) +static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); - rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), 1) - 3; + rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); - rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), 1) - 3; + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); + rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // FIXME: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param.yuv420 && rq_param.sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -225,8 +225,8 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_s rq_regs->crq_expansion_mode = 1; // Note: detile_buf_plane1_addr is in unit of 1KB - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma #ifdef __DML_RQ_DLG_CALC_DEBUG__ dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr); @@ -244,14 +244,14 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_s dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %0d\n", __func__, detile_buf_size_in_bytes); dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d\n", __func__, detile_buf_plane1_addr); dml_print("DML_DLG: %s: plane1_base_address = %0d\n", __func__, rq_regs->plane1_base_address); - dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param.misc.rq_l.stored_swath_bytes); - dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param.misc.rq_c.stored_swath_bytes); - dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param.dlg.rq_l.swath_height); - dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param.dlg.rq_c.swath_height); + dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_l.stored_swath_bytes); + dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_c.stored_swath_bytes); + dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param->dlg.rq_l.swath_height); + dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param->dlg.rq_c.swath_height); #endif } -static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st pipe_src_param) +static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -260,8 +260,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_p unsigned int full_swath_bytes_packed_c = 0; bool req128_l = 0; bool req128_c = 0; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -738,7 +738,7 @@ static void get_surf_rq_param( display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_params_st pipe_param, + const display_pipe_params_st *pipe_param, bool is_chroma, bool is_alpha) { @@ -752,33 +752,33 @@ static void get_surf_rq_param( // FIXME check if ppe apply for both luma and chroma in 422 case if (is_chroma | is_alpha) { - vp_width = pipe_param.src.viewport_width_c / ppe; - vp_height = pipe_param.src.viewport_height_c; - data_pitch = pipe_param.src.data_pitch_c; - meta_pitch = pipe_param.src.meta_pitch_c; - surface_height = pipe_param.src.surface_height_y / 2.0; + vp_width = pipe_param->src.viewport_width_c / ppe; + vp_height = pipe_param->src.viewport_height_c; + data_pitch = pipe_param->src.data_pitch_c; + meta_pitch = pipe_param->src.meta_pitch_c; + surface_height = pipe_param->src.surface_height_y / 2.0; } else { - vp_width = pipe_param.src.viewport_width / ppe; - vp_height = pipe_param.src.viewport_height; - data_pitch = pipe_param.src.data_pitch; - meta_pitch = pipe_param.src.meta_pitch; - surface_height = pipe_param.src.surface_height_y; + vp_width = pipe_param->src.viewport_width / ppe; + vp_height = pipe_param->src.viewport_height; + data_pitch = pipe_param->src.data_pitch; + meta_pitch = pipe_param->src.meta_pitch; + surface_height = pipe_param->src.surface_height_y; } - if (pipe_param.dest.odm_combine) { + if (pipe_param->dest.odm_combine) { unsigned int access_dir; unsigned int full_src_vp_width; unsigned int hactive_odm; unsigned int src_hactive_odm; - access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed - hactive_odm = pipe_param.dest.hactive / ((unsigned int) pipe_param.dest.odm_combine * 2); + access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed + hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine * 2); if (is_chroma) { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width; - src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width; + src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm; } else { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width; - src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width; + src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm; } if (access_dir == 0) { @@ -808,7 +808,7 @@ static void get_surf_rq_param( rq_sizing_param->meta_chunk_bytes = 2048; rq_sizing_param->min_meta_chunk_bytes = 256; - if (pipe_param.src.hostvm) + if (pipe_param->src.hostvm) rq_sizing_param->mpte_group_bytes = 512; else rq_sizing_param->mpte_group_bytes = 2048; @@ -822,46 +822,46 @@ static void get_surf_rq_param( vp_height, data_pitch, meta_pitch, - pipe_param.src.source_format, - pipe_param.src.sw_mode, - pipe_param.src.macro_tile_size, - pipe_param.src.source_scan, - pipe_param.src.hostvm, + pipe_param->src.source_format, + pipe_param->src.sw_mode, + pipe_param->src.macro_tile_size, + pipe_param->src.source_scan, + pipe_param->src.hostvm, is_chroma, surface_height); } -static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st pipe_param) +static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st *pipe_param) { // get param for luma surface - rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 || pipe_param.src.source_format == dm_420_10 || pipe_param.src.source_format == dm_rgbe_alpha - || pipe_param.src.source_format == dm_420_12; + rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 || pipe_param->src.source_format == dm_420_10 || pipe_param->src.source_format == dm_rgbe_alpha + || pipe_param->src.source_format == dm_420_12; - rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10; - rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha) ? 1 : 0; + rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha) ? 1 : 0; get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), &(rq_param->dlg.rq_l), &(rq_param->misc.rq_l), pipe_param, 0, 0); - if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) { + if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) { // get param for chroma surface get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), &(rq_param->dlg.rq_c), &(rq_param->misc.rq_c), pipe_param, 1, rq_param->rgbe_alpha); } // calculate how to split the det buffer space between luma and chroma - handle_det_buf_split(mode_lib, rq_param, pipe_param.src); - print__rq_params_st(mode_lib, *rq_param); + handle_det_buf_split(mode_lib, rq_param, &pipe_param->src); + print__rq_params_st(mode_lib, rq_param); } -void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st pipe_param) +void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = {0}; memset(rq_regs, 0, sizeof(*rq_regs)); dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param); - extract_rq_regs(mode_lib, rq_regs, rq_param); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } static void calculate_ttu_cursor( @@ -943,8 +943,8 @@ static void dml_rq_dlg_get_dlg_params( const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en, const bool vm_en, @@ -1112,13 +1112,13 @@ static void dml_rq_dlg_get_dlg_params( vratio_c = scl->vscl_ratio_c; scl_enable = scl->scl_enable; - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub; vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; @@ -1239,16 +1239,16 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, vratio_pre_c); // Active - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; - meta_row_height_c = rq_dlg_param.rq_c.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; + meta_row_height_c = rq_dlg_param->rq_c.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; if (mode_422) { swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element @@ -1669,15 +1669,15 @@ static void dml_rq_dlg_get_dlg_params( disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml31_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1699,12 +1699,12 @@ void dml31_rq_dlg_get_dlg_reg( dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib, e2e_pipe_param, num_pipes); dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes); - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe); + dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe); dml_rq_dlg_get_dlg_params( mode_lib, e2e_pipe_param, @@ -1712,8 +1712,8 @@ void dml31_rq_dlg_get_dlg_reg( pipe_idx, dlg_regs, ttu_regs, - rq_param.dlg, - dlg_sys_param, + &rq_param.dlg, + &dlg_sys_param, cstate_en, pstate_en, vm_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h index adf8518f761f..8ee991351699 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h @@ -41,7 +41,7 @@ struct display_mode_lib; // See also: <display_rq_regs_st> void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg // Calculate and return DLG and TTU register struct given the system setting @@ -57,7 +57,7 @@ void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, void dml31_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 1051ca1a23b8..edb9f7567d6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -80,11 +80,11 @@ enum dm_swizzle_mode { dm_sw_SPARE_13 = 24, dm_sw_64kb_s_x = 25, dm_sw_64kb_d_x = 26, - dm_sw_SPARE_14 = 27, + dm_sw_64kb_r_x = 27, dm_sw_SPARE_15 = 28, dm_sw_var_s_x = 29, dm_sw_var_d_x = 30, - dm_sw_64kb_r_x, + dm_sw_var_r_x = 31, dm_sw_gfx7_2d_thin_l_vp, dm_sw_gfx7_2d_thin_gl, }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index d42a0aeca6be..72b1957022aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -49,7 +49,7 @@ struct dml_funcs { struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -60,7 +60,7 @@ struct dml_funcs { void (*rq_dlg_get_rq_reg)( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); void (*recalculate)(struct display_mode_lib *mode_lib); void (*validate)(struct display_mode_lib *mode_lib); }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c index e2d82aacd3bc..71ea503cb32f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c @@ -26,371 +26,371 @@ #include "display_rq_dlg_helpers.h" #include "dml_logger.h" -void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param) +void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_params_st *rq_param) { dml_print("DML_RQ_DLG_CALC: ***************************\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_PARAM_ST\n"); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_l); + print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_l); dml_print("DML_RQ_DLG_CALC: <CHROMA> ===\n"); - print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_c); + print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_c); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_l); + print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_l); dml_print("DML_RQ_DLG_CALC: <CHROMA>\n"); - print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_c); + print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_c); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_l); + print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_l); dml_print("DML_RQ_DLG_CALC: <CHROMA>\n"); - print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_c); + print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_c); dml_print("DML_RQ_DLG_CALC: ***************************\n"); } -void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing) +void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n"); - dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing.chunk_bytes); - dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing.min_chunk_bytes); - dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing.meta_chunk_bytes); + dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing->chunk_bytes); + dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing->min_chunk_bytes); + dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing->meta_chunk_bytes); dml_print( "DML_RQ_DLG_CALC: min_meta_chunk_bytes = %0d\n", - rq_sizing.min_meta_chunk_bytes); - dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing.mpte_group_bytes); - dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing.dpte_group_bytes); + rq_sizing->min_meta_chunk_bytes); + dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing->mpte_group_bytes); + dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing->dpte_group_bytes); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param) +void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n"); dml_print( "DML_RQ_DLG_CALC: swath_width_ub = %0d\n", - rq_dlg_param.swath_width_ub); + rq_dlg_param->swath_width_ub); dml_print( "DML_RQ_DLG_CALC: swath_height = %0d\n", - rq_dlg_param.swath_height); + rq_dlg_param->swath_height); dml_print( "DML_RQ_DLG_CALC: req_per_swath_ub = %0d\n", - rq_dlg_param.req_per_swath_ub); + rq_dlg_param->req_per_swath_ub); dml_print( "DML_RQ_DLG_CALC: meta_pte_bytes_per_frame_ub = %0d\n", - rq_dlg_param.meta_pte_bytes_per_frame_ub); + rq_dlg_param->meta_pte_bytes_per_frame_ub); dml_print( "DML_RQ_DLG_CALC: dpte_req_per_row_ub = %0d\n", - rq_dlg_param.dpte_req_per_row_ub); + rq_dlg_param->dpte_req_per_row_ub); dml_print( "DML_RQ_DLG_CALC: dpte_groups_per_row_ub = %0d\n", - rq_dlg_param.dpte_groups_per_row_ub); + rq_dlg_param->dpte_groups_per_row_ub); dml_print( "DML_RQ_DLG_CALC: dpte_row_height = %0d\n", - rq_dlg_param.dpte_row_height); + rq_dlg_param->dpte_row_height); dml_print( "DML_RQ_DLG_CALC: dpte_bytes_per_row_ub = %0d\n", - rq_dlg_param.dpte_bytes_per_row_ub); + rq_dlg_param->dpte_bytes_per_row_ub); dml_print( "DML_RQ_DLG_CALC: meta_chunks_per_row_ub = %0d\n", - rq_dlg_param.meta_chunks_per_row_ub); + rq_dlg_param->meta_chunks_per_row_ub); dml_print( "DML_RQ_DLG_CALC: meta_req_per_row_ub = %0d\n", - rq_dlg_param.meta_req_per_row_ub); + rq_dlg_param->meta_req_per_row_ub); dml_print( "DML_RQ_DLG_CALC: meta_row_height = %0d\n", - rq_dlg_param.meta_row_height); + rq_dlg_param->meta_row_height); dml_print( "DML_RQ_DLG_CALC: meta_bytes_per_row_ub = %0d\n", - rq_dlg_param.meta_bytes_per_row_ub); + rq_dlg_param->meta_bytes_per_row_ub); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param) +void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n"); dml_print( "DML_RQ_DLG_CALC: full_swath_bytes = %0d\n", - rq_misc_param.full_swath_bytes); + rq_misc_param->full_swath_bytes); dml_print( "DML_RQ_DLG_CALC: stored_swath_bytes = %0d\n", - rq_misc_param.stored_swath_bytes); - dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param.blk256_width); - dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param.blk256_height); - dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param.req_width); - dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param.req_height); + rq_misc_param->stored_swath_bytes); + dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param->blk256_width); + dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param->blk256_height); + dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param->req_width); + dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param->req_height); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param) +void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n"); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_l); + print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_l); dml_print("DML_RQ_DLG_CALC: <CHROMA>\n"); - print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_c); + print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_c); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param) +void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n"); - dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param.t_mclk_wm_us); - dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param.t_urg_wm_us); - dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param.t_sr_wm_us); - dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param.t_extra_us); + dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param->t_mclk_wm_us); + dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param->t_urg_wm_us); + dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us); + dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us); dml_print( "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n", - dlg_sys_param.t_srx_delay_us); + dlg_sys_param->t_srx_delay_us); dml_print( "DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n", - dlg_sys_param.deepsleep_dcfclk_mhz); + dlg_sys_param->deepsleep_dcfclk_mhz); dml_print( "DML_RQ_DLG_CALC: total_flip_bw = %3.2f\n", - dlg_sys_param.total_flip_bw); + dlg_sys_param->total_flip_bw); dml_print( "DML_RQ_DLG_CALC: total_flip_bytes = %i\n", - dlg_sys_param.total_flip_bytes); + dlg_sys_param->total_flip_bytes); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st rq_regs) +void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n"); - dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs.chunk_size); - dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs.min_chunk_size); - dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs.meta_chunk_size); + dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs->chunk_size); + dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs->min_chunk_size); + dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs->meta_chunk_size); dml_print( "DML_RQ_DLG_CALC: min_meta_chunk_size = 0x%0x\n", - rq_regs.min_meta_chunk_size); - dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs.dpte_group_size); - dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs.mpte_group_size); - dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs.swath_height); + rq_regs->min_meta_chunk_size); + dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs->dpte_group_size); + dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs->mpte_group_size); + dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs->swath_height); dml_print( "DML_RQ_DLG_CALC: pte_row_height_linear = 0x%0x\n", - rq_regs.pte_row_height_linear); + rq_regs->pte_row_height_linear); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs) +void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_regs_st *rq_regs) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_REGS_ST\n"); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_l); + print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_l); dml_print("DML_RQ_DLG_CALC: <CHROMA>\n"); - print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_c); - dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs.drq_expansion_mode); - dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs.prq_expansion_mode); - dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs.mrq_expansion_mode); - dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs.crq_expansion_mode); - dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs.plane1_base_address); + print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_c); + dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode); + dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode); + dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode); + dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode); + dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs) +void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n"); dml_print( "DML_RQ_DLG_CALC: refcyc_h_blank_end = 0x%0x\n", - dlg_regs.refcyc_h_blank_end); + dlg_regs->refcyc_h_blank_end); dml_print( "DML_RQ_DLG_CALC: dlg_vblank_end = 0x%0x\n", - dlg_regs.dlg_vblank_end); + dlg_regs->dlg_vblank_end); dml_print( "DML_RQ_DLG_CALC: min_dst_y_next_start = 0x%0x\n", - dlg_regs.min_dst_y_next_start); + dlg_regs->min_dst_y_next_start); dml_print( "DML_RQ_DLG_CALC: refcyc_per_htotal = 0x%0x\n", - dlg_regs.refcyc_per_htotal); + dlg_regs->refcyc_per_htotal); dml_print( "DML_RQ_DLG_CALC: refcyc_x_after_scaler = 0x%0x\n", - dlg_regs.refcyc_x_after_scaler); + dlg_regs->refcyc_x_after_scaler); dml_print( "DML_RQ_DLG_CALC: dst_y_after_scaler = 0x%0x\n", - dlg_regs.dst_y_after_scaler); + dlg_regs->dst_y_after_scaler); dml_print( "DML_RQ_DLG_CALC: dst_y_prefetch = 0x%0x\n", - dlg_regs.dst_y_prefetch); + dlg_regs->dst_y_prefetch); dml_print( "DML_RQ_DLG_CALC: dst_y_per_vm_vblank = 0x%0x\n", - dlg_regs.dst_y_per_vm_vblank); + dlg_regs->dst_y_per_vm_vblank); dml_print( "DML_RQ_DLG_CALC: dst_y_per_row_vblank = 0x%0x\n", - dlg_regs.dst_y_per_row_vblank); + dlg_regs->dst_y_per_row_vblank); dml_print( "DML_RQ_DLG_CALC: dst_y_per_vm_flip = 0x%0x\n", - dlg_regs.dst_y_per_vm_flip); + dlg_regs->dst_y_per_vm_flip); dml_print( "DML_RQ_DLG_CALC: dst_y_per_row_flip = 0x%0x\n", - dlg_regs.dst_y_per_row_flip); + dlg_regs->dst_y_per_row_flip); dml_print( "DML_RQ_DLG_CALC: ref_freq_to_pix_freq = 0x%0x\n", - dlg_regs.ref_freq_to_pix_freq); + dlg_regs->ref_freq_to_pix_freq); dml_print( "DML_RQ_DLG_CALC: vratio_prefetch = 0x%0x\n", - dlg_regs.vratio_prefetch); + dlg_regs->vratio_prefetch); dml_print( "DML_RQ_DLG_CALC: vratio_prefetch_c = 0x%0x\n", - dlg_regs.vratio_prefetch_c); + dlg_regs->vratio_prefetch_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_l = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_vblank_l); + dlg_regs->refcyc_per_pte_group_vblank_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_c = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_vblank_c); + dlg_regs->refcyc_per_pte_group_vblank_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_l = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_vblank_l); + dlg_regs->refcyc_per_meta_chunk_vblank_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_c = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_vblank_c); + dlg_regs->refcyc_per_meta_chunk_vblank_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_l = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_flip_l); + dlg_regs->refcyc_per_pte_group_flip_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_c = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_flip_c); + dlg_regs->refcyc_per_pte_group_flip_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_l = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_flip_l); + dlg_regs->refcyc_per_meta_chunk_flip_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_c = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_flip_c); + dlg_regs->refcyc_per_meta_chunk_flip_c); dml_print( "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_l = 0x%0x\n", - dlg_regs.dst_y_per_pte_row_nom_l); + dlg_regs->dst_y_per_pte_row_nom_l); dml_print( "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_c = 0x%0x\n", - dlg_regs.dst_y_per_pte_row_nom_c); + dlg_regs->dst_y_per_pte_row_nom_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_l = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_nom_l); + dlg_regs->refcyc_per_pte_group_nom_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_c = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_nom_c); + dlg_regs->refcyc_per_pte_group_nom_c); dml_print( "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_l = 0x%0x\n", - dlg_regs.dst_y_per_meta_row_nom_l); + dlg_regs->dst_y_per_meta_row_nom_l); dml_print( "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_c = 0x%0x\n", - dlg_regs.dst_y_per_meta_row_nom_c); + dlg_regs->dst_y_per_meta_row_nom_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_l = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_nom_l); + dlg_regs->refcyc_per_meta_chunk_nom_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_c = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_nom_c); + dlg_regs->refcyc_per_meta_chunk_nom_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_l = 0x%0x\n", - dlg_regs.refcyc_per_line_delivery_pre_l); + dlg_regs->refcyc_per_line_delivery_pre_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_c = 0x%0x\n", - dlg_regs.refcyc_per_line_delivery_pre_c); + dlg_regs->refcyc_per_line_delivery_pre_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_line_delivery_l = 0x%0x\n", - dlg_regs.refcyc_per_line_delivery_l); + dlg_regs->refcyc_per_line_delivery_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_line_delivery_c = 0x%0x\n", - dlg_regs.refcyc_per_line_delivery_c); + dlg_regs->refcyc_per_line_delivery_c); dml_print( "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur0 = 0x%0x\n", - dlg_regs.chunk_hdl_adjust_cur0); + dlg_regs->chunk_hdl_adjust_cur0); dml_print( "DML_RQ_DLG_CALC: dst_y_offset_cur1 = 0x%0x\n", - dlg_regs.dst_y_offset_cur1); + dlg_regs->dst_y_offset_cur1); dml_print( "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur1 = 0x%0x\n", - dlg_regs.chunk_hdl_adjust_cur1); + dlg_regs->chunk_hdl_adjust_cur1); dml_print( "DML_RQ_DLG_CALC: vready_after_vcount0 = 0x%0x\n", - dlg_regs.vready_after_vcount0); + dlg_regs->vready_after_vcount0); dml_print( "DML_RQ_DLG_CALC: dst_y_delta_drq_limit = 0x%0x\n", - dlg_regs.dst_y_delta_drq_limit); + dlg_regs->dst_y_delta_drq_limit); dml_print( "DML_RQ_DLG_CALC: xfc_reg_transfer_delay = 0x%0x\n", - dlg_regs.xfc_reg_transfer_delay); + dlg_regs->xfc_reg_transfer_delay); dml_print( "DML_RQ_DLG_CALC: xfc_reg_precharge_delay = 0x%0x\n", - dlg_regs.xfc_reg_precharge_delay); + dlg_regs->xfc_reg_precharge_delay); dml_print( "DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n", - dlg_regs.xfc_reg_remote_surface_flip_latency); + dlg_regs->xfc_reg_remote_surface_flip_latency); dml_print( "DML_RQ_DLG_CALC: refcyc_per_vm_dmdata = 0x%0x\n", - dlg_regs.refcyc_per_vm_dmdata); + dlg_regs->refcyc_per_vm_dmdata); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs) +void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n"); dml_print( "DML_RQ_DLG_CALC: qos_level_low_wm = 0x%0x\n", - ttu_regs.qos_level_low_wm); + ttu_regs->qos_level_low_wm); dml_print( "DML_RQ_DLG_CALC: qos_level_high_wm = 0x%0x\n", - ttu_regs.qos_level_high_wm); + ttu_regs->qos_level_high_wm); dml_print( "DML_RQ_DLG_CALC: min_ttu_vblank = 0x%0x\n", - ttu_regs.min_ttu_vblank); + ttu_regs->min_ttu_vblank); dml_print( "DML_RQ_DLG_CALC: qos_level_flip = 0x%0x\n", - ttu_regs.qos_level_flip); + ttu_regs->qos_level_flip); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_l = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_pre_l); + ttu_regs->refcyc_per_req_delivery_pre_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_l = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_l); + ttu_regs->refcyc_per_req_delivery_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_c = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_pre_c); + ttu_regs->refcyc_per_req_delivery_pre_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_c = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_c); + ttu_regs->refcyc_per_req_delivery_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur0 = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_cur0); + ttu_regs->refcyc_per_req_delivery_cur0); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur0 = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_pre_cur0); + ttu_regs->refcyc_per_req_delivery_pre_cur0); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur1 = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_cur1); + ttu_regs->refcyc_per_req_delivery_cur1); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur1 = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_pre_cur1); + ttu_regs->refcyc_per_req_delivery_pre_cur1); dml_print( "DML_RQ_DLG_CALC: qos_level_fixed_l = 0x%0x\n", - ttu_regs.qos_level_fixed_l); + ttu_regs->qos_level_fixed_l); dml_print( "DML_RQ_DLG_CALC: qos_ramp_disable_l = 0x%0x\n", - ttu_regs.qos_ramp_disable_l); + ttu_regs->qos_ramp_disable_l); dml_print( "DML_RQ_DLG_CALC: qos_level_fixed_c = 0x%0x\n", - ttu_regs.qos_level_fixed_c); + ttu_regs->qos_level_fixed_c); dml_print( "DML_RQ_DLG_CALC: qos_ramp_disable_c = 0x%0x\n", - ttu_regs.qos_ramp_disable_c); + ttu_regs->qos_ramp_disable_c); dml_print( "DML_RQ_DLG_CALC: qos_level_fixed_cur0 = 0x%0x\n", - ttu_regs.qos_level_fixed_cur0); + ttu_regs->qos_level_fixed_cur0); dml_print( "DML_RQ_DLG_CALC: qos_ramp_disable_cur0 = 0x%0x\n", - ttu_regs.qos_ramp_disable_cur0); + ttu_regs->qos_ramp_disable_cur0); dml_print( "DML_RQ_DLG_CALC: qos_level_fixed_cur1 = 0x%0x\n", - ttu_regs.qos_level_fixed_cur1); + ttu_regs->qos_level_fixed_cur1); dml_print( "DML_RQ_DLG_CALC: qos_ramp_disable_cur1 = 0x%0x\n", - ttu_regs.qos_ramp_disable_cur1); + ttu_regs->qos_ramp_disable_cur1); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h index 2555ef0358c2..ebcd717744e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h @@ -31,16 +31,16 @@ /* Function: Printer functions * Print various struct */ -void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param); -void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing); -void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param); -void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param); -void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param); -void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param); +void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_params_st *rq_param); +void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing); +void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param); +void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param); +void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param); +void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param); -void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st data_rq_regs); -void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs); -void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs); -void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs); +void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs); +void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_regs_st *rq_regs); +void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs); +void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index 8f2b1684c231..59dc2c5b58dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -206,47 +206,47 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_data_rq_regs_st *rq_regs, - const struct _vcs_dpi_display_data_rq_sizing_params_st rq_sizing) + const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing) { DTRACE("DLG: %s: rq_sizing param", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } void dml1_extract_rq_regs( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_regs_st *rq_regs, - const struct _vcs_dpi_display_rq_params_st rq_param) + const struct _vcs_dpi_display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); - if (rq_param.yuv420) - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); + if (rq_param->yuv420) + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); /* TODO: take the max between luma, chroma chunk size? * okay for now, as we are setting chunk_bytes to 8kb anyways */ - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */ + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */ rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -255,9 +255,9 @@ void dml1_extract_rq_regs( rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes - / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes + / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); /* half to chroma */ } else { detile_buf_plane1_addr = dml_round_to_multiple( @@ -272,7 +272,7 @@ void dml1_extract_rq_regs( static void handle_det_buf_split( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_params_st *rq_param, - const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param) + const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -281,8 +281,8 @@ static void handle_det_buf_split( unsigned int full_swath_bytes_packed_c = 0; bool req128_l = 0; bool req128_c = 0; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -556,7 +556,7 @@ static void get_surf_rq_param( struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing_param, struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param, struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param, - const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param, + const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param, bool is_chroma) { bool mode_422 = 0; @@ -622,15 +622,15 @@ static void get_surf_rq_param( /* TODO check if ppe apply for both luma and chroma in 422 case */ if (is_chroma) { - vp_width = pipe_src_param.viewport_width_c / ppe; - vp_height = pipe_src_param.viewport_height_c; - data_pitch = pipe_src_param.data_pitch_c; - meta_pitch = pipe_src_param.meta_pitch_c; + vp_width = pipe_src_param->viewport_width_c / ppe; + vp_height = pipe_src_param->viewport_height_c; + data_pitch = pipe_src_param->data_pitch_c; + meta_pitch = pipe_src_param->meta_pitch_c; } else { - vp_width = pipe_src_param.viewport_width / ppe; - vp_height = pipe_src_param.viewport_height; - data_pitch = pipe_src_param.data_pitch; - meta_pitch = pipe_src_param.meta_pitch; + vp_width = pipe_src_param->viewport_width / ppe; + vp_height = pipe_src_param->viewport_height; + data_pitch = pipe_src_param->data_pitch; + meta_pitch = pipe_src_param->meta_pitch; } rq_sizing_param->chunk_bytes = 8192; @@ -645,11 +645,11 @@ static void get_surf_rq_param( rq_sizing_param->mpte_group_bytes = 2048; - surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - surf_vert = (pipe_src_param.source_scan == dm_vert); + surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + surf_vert = (pipe_src_param->source_scan == dm_vert); bytes_per_element = get_bytes_per_element( - (enum source_format_class) pipe_src_param.source_format, + (enum source_format_class) pipe_src_param->source_format, is_chroma); log2_bytes_per_element = dml_log2(bytes_per_element); blk256_width = 0; @@ -671,7 +671,7 @@ static void get_surf_rq_param( log2_blk256_height = dml_log2((double) blk256_height); blk_bytes = surf_linear ? 256 : get_blk_size_bytes( - (enum source_macro_tile_size) pipe_src_param.macro_tile_size); + (enum source_macro_tile_size) pipe_src_param->macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); log2_blk_height = 0; log2_blk_width = 0; @@ -682,7 +682,7 @@ static void get_surf_rq_param( * "/2" is like square root * blk is vertical biased */ - if (pipe_src_param.sw_mode != dm_sw_linear) + if (pipe_src_param->sw_mode != dm_sw_linear) log2_blk_height = log2_blk256_height + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1); else @@ -930,10 +930,10 @@ static void get_surf_rq_param( &func_meta_row_height, vp_width, data_pitch, - pipe_src_param.source_format, - pipe_src_param.sw_mode, - pipe_src_param.macro_tile_size, - pipe_src_param.source_scan, + pipe_src_param->source_format, + pipe_src_param->sw_mode, + pipe_src_param->macro_tile_size, + pipe_src_param->source_scan, is_chroma); /* Just a check to make sure this function and the new one give the same @@ -960,12 +960,12 @@ static void get_surf_rq_param( void dml1_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_params_st *rq_param, - const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param) + const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param) { /* get param for luma surface */ - rq_param->yuv420 = pipe_src_param.source_format == dm_420_8 - || pipe_src_param.source_format == dm_420_10; - rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10; + rq_param->yuv420 = pipe_src_param->source_format == dm_420_8 + || pipe_src_param->source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10; get_surf_rq_param( mode_lib, @@ -975,7 +975,7 @@ void dml1_rq_dlg_get_rq_params( pipe_src_param, 0); - if (is_dual_plane((enum source_format_class) pipe_src_param.source_format)) { + if (is_dual_plane((enum source_format_class) pipe_src_param->source_format)) { /* get param for chroma surface */ get_surf_rq_param( mode_lib, @@ -988,7 +988,7 @@ void dml1_rq_dlg_get_rq_params( /* calculate how to split the det buffer space between luma and chroma */ handle_det_buf_split(mode_lib, rq_param, pipe_src_param); - print__rq_params_st(mode_lib, *rq_param); + print__rq_params_st(mode_lib, rq_param); } /* Note: currently taken in as is. @@ -998,26 +998,26 @@ void dml1_rq_dlg_get_dlg_params( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_dlg_regs_st *disp_dlg_regs, struct _vcs_dpi_display_ttu_regs_st *disp_ttu_regs, - const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param, - const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param, - const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param, + const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param, + const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param, + const struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe_param, const bool cstate_en, const bool pstate_en, const bool vm_en, const bool iflip_en) { /* Timing */ - unsigned int htotal = e2e_pipe_param.pipe.dest.htotal; - unsigned int hblank_end = e2e_pipe_param.pipe.dest.hblank_end; - unsigned int vblank_start = e2e_pipe_param.pipe.dest.vblank_start; - unsigned int vblank_end = e2e_pipe_param.pipe.dest.vblank_end; - bool interlaced = e2e_pipe_param.pipe.dest.interlaced; + unsigned int htotal = e2e_pipe_param->pipe.dest.htotal; + unsigned int hblank_end = e2e_pipe_param->pipe.dest.hblank_end; + unsigned int vblank_start = e2e_pipe_param->pipe.dest.vblank_start; + unsigned int vblank_end = e2e_pipe_param->pipe.dest.vblank_end; + bool interlaced = e2e_pipe_param->pipe.dest.interlaced; unsigned int min_vblank = mode_lib->ip.min_vblank_lines; - double pclk_freq_in_mhz = e2e_pipe_param.pipe.dest.pixel_rate_mhz; - double refclk_freq_in_mhz = e2e_pipe_param.clks_cfg.refclk_mhz; - double dppclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dppclk_mhz; - double dispclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dispclk_mhz; + double pclk_freq_in_mhz = e2e_pipe_param->pipe.dest.pixel_rate_mhz; + double refclk_freq_in_mhz = e2e_pipe_param->clks_cfg.refclk_mhz; + double dppclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dppclk_mhz; + double dispclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dispclk_mhz; double ref_freq_to_pix_freq; double prefetch_xy_calc_in_dcfclk; @@ -1160,13 +1160,13 @@ void dml1_rq_dlg_get_dlg_params( disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */ prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */ - min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; + min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz; t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz; - min_ttu_vblank = dlg_sys_param.t_urg_wm_us; + min_ttu_vblank = dlg_sys_param->t_urg_wm_us; if (cstate_en) - min_ttu_vblank = dml_max(dlg_sys_param.t_sr_wm_us, min_ttu_vblank); + min_ttu_vblank = dml_max(dlg_sys_param->t_sr_wm_us, min_ttu_vblank); if (pstate_en) - min_ttu_vblank = dml_max(dlg_sys_param.t_mclk_wm_us, min_ttu_vblank); + min_ttu_vblank = dml_max(dlg_sys_param->t_mclk_wm_us, min_ttu_vblank); min_ttu_vblank = min_ttu_vblank + t_calc_us; min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal; @@ -1197,59 +1197,59 @@ void dml1_rq_dlg_get_dlg_params( /* ------------------------- */ /* Prefetch Calc */ /* Source */ - dcc_en = e2e_pipe_param.pipe.src.dcc; + dcc_en = e2e_pipe_param->pipe.src.dcc; dual_plane = is_dual_plane( - (enum source_format_class) e2e_pipe_param.pipe.src.source_format); + (enum source_format_class) e2e_pipe_param->pipe.src.source_format); mode_422 = 0; /* TODO */ - access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */ + access_dir = (e2e_pipe_param->pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */ bytes_per_element_l = get_bytes_per_element( - (enum source_format_class) e2e_pipe_param.pipe.src.source_format, + (enum source_format_class) e2e_pipe_param->pipe.src.source_format, 0); bytes_per_element_c = get_bytes_per_element( - (enum source_format_class) e2e_pipe_param.pipe.src.source_format, + (enum source_format_class) e2e_pipe_param->pipe.src.source_format, 1); - vp_height_l = e2e_pipe_param.pipe.src.viewport_height; - vp_width_l = e2e_pipe_param.pipe.src.viewport_width; - vp_height_c = e2e_pipe_param.pipe.src.viewport_height_c; - vp_width_c = e2e_pipe_param.pipe.src.viewport_width_c; + vp_height_l = e2e_pipe_param->pipe.src.viewport_height; + vp_width_l = e2e_pipe_param->pipe.src.viewport_width; + vp_height_c = e2e_pipe_param->pipe.src.viewport_height_c; + vp_width_c = e2e_pipe_param->pipe.src.viewport_width_c; /* Scaling */ - htaps_l = e2e_pipe_param.pipe.scale_taps.htaps; - htaps_c = e2e_pipe_param.pipe.scale_taps.htaps_c; - hratios_l = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio; - hratios_c = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio_c; - vratio_l = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio; - vratio_c = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio_c; + htaps_l = e2e_pipe_param->pipe.scale_taps.htaps; + htaps_c = e2e_pipe_param->pipe.scale_taps.htaps_c; + hratios_l = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio; + hratios_c = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio_c; + vratio_l = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio; + vratio_c = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio_c; line_time_in_us = (htotal / pclk_freq_in_mhz); - vinit_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit; - vinit_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_c; - vinit_bot_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot; - vinit_bot_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot_c; - - swath_height_l = rq_dlg_param.rq_l.swath_height; - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; - dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; - meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub; - meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub; - - swath_height_c = rq_dlg_param.rq_c.swath_height; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; - - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - vupdate_offset = e2e_pipe_param.pipe.dest.vupdate_offset; - vupdate_width = e2e_pipe_param.pipe.dest.vupdate_width; - vready_offset = e2e_pipe_param.pipe.dest.vready_offset; + vinit_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit; + vinit_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_c; + vinit_bot_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot; + vinit_bot_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot_c; + + swath_height_l = rq_dlg_param->rq_l.swath_height; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; + dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; + meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub; + meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub; + + swath_height_c = rq_dlg_param->rq_c.swath_height; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; + + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + vupdate_offset = e2e_pipe_param->pipe.dest.vupdate_offset; + vupdate_width = e2e_pipe_param->pipe.dest.vupdate_width; + vready_offset = e2e_pipe_param->pipe.dest.vready_offset; dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal; dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal; pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz; - vstartup_start = e2e_pipe_param.pipe.dest.vstartup_start; + vstartup_start = e2e_pipe_param->pipe.dest.vstartup_start; if (interlaced) vstartup_start = vstartup_start / 2; @@ -1276,13 +1276,13 @@ void dml1_rq_dlg_get_dlg_params( dst_x_after_scaler = 0; dst_y_after_scaler = 0; - if (e2e_pipe_param.pipe.src.is_hsplit) + if (e2e_pipe_param->pipe.src.is_hsplit) dst_x_after_scaler = pixel_rate_delay_subtotal - + e2e_pipe_param.pipe.dest.recout_width; + + e2e_pipe_param->pipe.dest.recout_width; else dst_x_after_scaler = pixel_rate_delay_subtotal; - if (e2e_pipe_param.dout.output_format == dm_420) + if (e2e_pipe_param->dout.output_format == dm_420) dst_y_after_scaler = 1; else dst_y_after_scaler = 0; @@ -1334,7 +1334,7 @@ void dml1_rq_dlg_get_dlg_params( DTRACE( "DLG: %s: t_srx_delay_us = %3.2f", __func__, - (double) dlg_sys_param.t_srx_delay_us); + (double) dlg_sys_param->t_srx_delay_us); DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us); DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset); DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width); @@ -1408,12 +1408,12 @@ void dml1_rq_dlg_get_dlg_params( DTRACE("DLG: %s: dpte_row_bytes = %d", __func__, dpte_row_bytes); prefetch_bw = (vm_bytes + 2 * dpte_row_bytes + 2 * meta_row_bytes + sw_bytes) / t_pre_us; - flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param.total_flip_bw) - / (double) dlg_sys_param.total_flip_bytes; + flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param->total_flip_bw) + / (double) dlg_sys_param->total_flip_bytes; t_vm_us = line_time_in_us / 4.0; if (vm_en && dcc_en) { t_vm_us = dml_max( - dlg_sys_param.t_extra_us, + dlg_sys_param->t_extra_us, dml_max((double) vm_bytes / prefetch_bw, t_vm_us)); if (iflip_en && !dual_plane) { @@ -1423,12 +1423,12 @@ void dml1_rq_dlg_get_dlg_params( } } - t_r0_us = dml_max(dlg_sys_param.t_extra_us - t_vm_us, line_time_in_us - t_vm_us); + t_r0_us = dml_max(dlg_sys_param->t_extra_us - t_vm_us, line_time_in_us - t_vm_us); if (vm_en || dcc_en) { t_r0_us = dml_max( (double) (dpte_row_bytes + meta_row_bytes) / prefetch_bw, - dlg_sys_param.t_extra_us); + dlg_sys_param->t_extra_us); t_r0_us = dml_max((double) (line_time_in_us - t_vm_us), t_r0_us); if (iflip_en && !dual_plane) { @@ -1550,15 +1550,15 @@ void dml1_rq_dlg_get_dlg_params( disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;/* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */ /* Active */ - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l / (double) vratio_l * dml_pow(2, 2)); @@ -1650,14 +1650,14 @@ void dml1_rq_dlg_get_dlg_params( refcyc_per_req_delivery_cur0 = 0.; full_recout_width = 0; - if (e2e_pipe_param.pipe.src.is_hsplit) { - if (e2e_pipe_param.pipe.dest.full_recout_width == 0) { + if (e2e_pipe_param->pipe.src.is_hsplit) { + if (e2e_pipe_param->pipe.dest.full_recout_width == 0) { DTRACE("DLG: %s: Warningfull_recout_width not set in hsplit mode", __func__); - full_recout_width = e2e_pipe_param.pipe.dest.recout_width * 2; /* assume half split for dcn1 */ + full_recout_width = e2e_pipe_param->pipe.dest.recout_width * 2; /* assume half split for dcn1 */ } else - full_recout_width = e2e_pipe_param.pipe.dest.full_recout_width; + full_recout_width = e2e_pipe_param->pipe.dest.full_recout_width; } else - full_recout_width = e2e_pipe_param.pipe.dest.recout_width; + full_recout_width = e2e_pipe_param->pipe.dest.recout_width; refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery( mode_lib, @@ -1824,9 +1824,9 @@ void dml1_rq_dlg_get_dlg_params( } /* TTU - Cursor */ - hratios_cur0 = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio; - cur0_src_width = e2e_pipe_param.pipe.src.cur0_src_width; /* cursor source width */ - cur0_bpp = (enum cursor_bpp) e2e_pipe_param.pipe.src.cur0_bpp; + hratios_cur0 = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio; + cur0_src_width = e2e_pipe_param->pipe.src.cur0_src_width; /* cursor source width */ + cur0_bpp = (enum cursor_bpp) e2e_pipe_param->pipe.src.cur0_bpp; cur0_req_size = 0; cur0_req_width = 0; cur0_width_ub = 0.0; @@ -1927,6 +1927,6 @@ void dml1_rq_dlg_get_dlg_params( disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h index 9c06913ad767..e19ee3bde45f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h @@ -33,7 +33,7 @@ struct display_mode_lib; void dml1_extract_rq_regs( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_regs_st *rq_regs, - const struct _vcs_dpi_display_rq_params_st rq_param); + const struct _vcs_dpi_display_rq_params_st *rq_param); /* Function: dml_rq_dlg_get_rq_params * Calculate requestor related parameters that register definition agnostic * (i.e. this layer does try to separate real values from register definition) @@ -45,7 +45,7 @@ void dml1_extract_rq_regs( void dml1_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_params_st *rq_param, - const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param); + const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param); /* Function: dml_rq_dlg_get_dlg_params @@ -55,9 +55,9 @@ void dml1_rq_dlg_get_dlg_params( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_dlg_regs_st *dlg_regs, struct _vcs_dpi_display_ttu_regs_st *ttu_regs, - const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param, - const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param, - const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param, + const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param, + const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param, + const struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe_param, const bool cstate_en, const bool pstate_en, const bool vm_en, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index f5b7da0e64c0..0321b4446e05 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -40,8 +40,15 @@ static bool dsc_policy_enable_dsc_when_not_needed; static bool dsc_policy_disable_dsc_stream_overhead; +#ifndef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) +#endif +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif + /* Forward Declerations */ -static void get_dsc_bandwidth_range( +static bool decide_dsc_bandwidth_range( const uint32_t min_bpp_x16, const uint32_t max_bpp_x16, const uint32_t num_slices_h, @@ -76,11 +83,6 @@ static bool setup_dsc_config( int max_dsc_target_bpp_limit_override_x16, struct dc_dsc_config *dsc_cfg); -static struct fixed31_32 compute_dsc_max_bandwidth_overhead( - const struct dc_crtc_timing *timing, - const int num_slices_h, - const bool is_dp); - static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) { @@ -361,7 +363,7 @@ bool dc_dsc_compute_bandwidth_range( dsc_min_slice_height_override, max_bpp_x16, &config); if (is_dsc_possible) - get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, + is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, config.num_slices_h, &dsc_common_caps, timing, range); return is_dsc_possible; @@ -462,32 +464,6 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value) return (value + 9) / 10; } -static struct fixed31_32 compute_dsc_max_bandwidth_overhead( - const struct dc_crtc_timing *timing, - const int num_slices_h, - const bool is_dp) -{ - struct fixed31_32 max_dsc_overhead; - struct fixed31_32 refresh_rate; - - if (dsc_policy_disable_dsc_stream_overhead || !is_dp) - return dc_fixpt_from_int(0); - - /* use target bpp that can take entire target bandwidth */ - refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz); - refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total); - refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total); - refresh_rate = dc_fixpt_mul_int(refresh_rate, 100); - - max_dsc_overhead = dc_fixpt_from_int(num_slices_h); - max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total); - max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256); - max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000); - max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate); - - return max_dsc_overhead; -} - static uint32_t compute_bpp_x16_from_target_bandwidth( const uint32_t bandwidth_in_kbps, const struct dc_crtc_timing *timing, @@ -495,14 +471,14 @@ static uint32_t compute_bpp_x16_from_target_bandwidth( const uint32_t bpp_increment_div, const bool is_dp) { - struct fixed31_32 overhead_in_kbps; + uint32_t overhead_in_kbps; struct fixed31_32 effective_bandwidth_in_kbps; struct fixed31_32 bpp_x16; - overhead_in_kbps = compute_dsc_max_bandwidth_overhead( + overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps( timing, num_slices_h, is_dp); effective_bandwidth_in_kbps = dc_fixpt_from_int(bandwidth_in_kbps); - effective_bandwidth_in_kbps = dc_fixpt_sub(effective_bandwidth_in_kbps, + effective_bandwidth_in_kbps = dc_fixpt_sub_int(effective_bandwidth_in_kbps, overhead_in_kbps); bpp_x16 = dc_fixpt_mul_int(effective_bandwidth_in_kbps, 10); bpp_x16 = dc_fixpt_div_int(bpp_x16, timing->pix_clk_100hz); @@ -512,10 +488,12 @@ static uint32_t compute_bpp_x16_from_target_bandwidth( return dc_fixpt_floor(bpp_x16); } -/* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock - * and uncompressed bandwidth. +/* Decide DSC bandwidth range based on signal, timing, specs specific and input min and max + * requirements. + * The range output includes decided min/max target bpp, the respective bandwidth requirements + * and native timing bandwidth requirement when DSC is not used. */ -static void get_dsc_bandwidth_range( +static bool decide_dsc_bandwidth_range( const uint32_t min_bpp_x16, const uint32_t max_bpp_x16, const uint32_t num_slices_h, @@ -523,39 +501,45 @@ static void get_dsc_bandwidth_range( const struct dc_crtc_timing *timing, struct dc_dsc_bw_range *range) { - /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); - - /* max dsc target bpp */ - range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, - max_bpp_x16, num_slices_h, dsc_caps->is_dp); - range->max_target_bpp_x16 = max_bpp_x16; - if (range->max_kbps > range->stream_kbps) { - /* max dsc target bpp is capped to native bandwidth */ - range->max_kbps = range->stream_kbps; - range->max_target_bpp_x16 = compute_bpp_x16_from_target_bandwidth( - range->max_kbps, timing, num_slices_h, - dsc_caps->bpp_increment_div, - dsc_caps->is_dp); + uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16; + + memset(range, 0, sizeof(*range)); + + /* apply signal, timing, specs and explicitly specified DSC range requirements */ + if (preferred_bpp_x16) { + if (preferred_bpp_x16 <= max_bpp_x16 && + preferred_bpp_x16 >= min_bpp_x16) { + range->max_target_bpp_x16 = preferred_bpp_x16; + range->min_target_bpp_x16 = preferred_bpp_x16; + } } + else { + range->max_target_bpp_x16 = max_bpp_x16; + range->min_target_bpp_x16 = min_bpp_x16; + } + + /* populate output structure */ + if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) { + /* native stream bandwidth */ + range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + + /* max dsc target bpp */ + range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, + range->max_target_bpp_x16, num_slices_h, dsc_caps->is_dp); - /* min dsc target bpp */ - range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, - min_bpp_x16, num_slices_h, dsc_caps->is_dp); - range->min_target_bpp_x16 = min_bpp_x16; - if (range->min_kbps > range->max_kbps) { - /* min dsc target bpp is capped to max dsc bandwidth*/ - range->min_kbps = range->max_kbps; - range->min_target_bpp_x16 = range->max_target_bpp_x16; + /* min dsc target bpp */ + range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, + range->min_target_bpp_x16, num_slices_h, dsc_caps->is_dp); } + + return range->max_kbps >= range->min_kbps && range->min_kbps > 0; } /* Decides if DSC should be used and calculates target bpp if it should, applying DSC policy. * * Returns: - * - 'true' if DSC was required by policy and was successfully applied - * - 'false' if DSC was not necessary (e.g. if uncompressed stream fits 'target_bandwidth_kbps'), - * or if it couldn't be applied based on DSC policy. + * - 'true' if target bpp is decided + * - 'false' if target bpp cannot be decided (e.g. cannot fit even with min DSC bpp), */ static bool decide_dsc_target_bpp_x16( const struct dc_dsc_policy *policy, @@ -565,40 +549,29 @@ static bool decide_dsc_target_bpp_x16( const int num_slices_h, int *target_bpp_x16) { - bool should_use_dsc = false; struct dc_dsc_bw_range range; - memset(&range, 0, sizeof(range)); - - get_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, - num_slices_h, dsc_common_caps, timing, &range); - if (!policy->enable_dsc_when_not_needed && target_bandwidth_kbps >= range.stream_kbps) { - /* enough bandwidth without dsc */ - *target_bpp_x16 = 0; - should_use_dsc = false; - } else if (policy->preferred_bpp_x16 > 0 && - policy->preferred_bpp_x16 <= range.max_target_bpp_x16 && - policy->preferred_bpp_x16 >= range.min_target_bpp_x16) { - *target_bpp_x16 = policy->preferred_bpp_x16; - should_use_dsc = true; - } else if (target_bandwidth_kbps >= range.max_kbps) { - /* use max target bpp allowed */ - *target_bpp_x16 = range.max_target_bpp_x16; - should_use_dsc = true; - } else if (target_bandwidth_kbps >= range.min_kbps) { - /* use target bpp that can take entire target bandwidth */ - *target_bpp_x16 = compute_bpp_x16_from_target_bandwidth( - target_bandwidth_kbps, timing, num_slices_h, - dsc_common_caps->bpp_increment_div, - dsc_common_caps->is_dp); - should_use_dsc = true; - } else { - /* not enough bandwidth to fulfill minimum requirement */ - *target_bpp_x16 = 0; - should_use_dsc = false; + *target_bpp_x16 = 0; + + if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, + num_slices_h, dsc_common_caps, timing, &range)) { + if (target_bandwidth_kbps >= range.stream_kbps) { + if (policy->enable_dsc_when_not_needed) + /* enable max bpp even dsc is not needed */ + *target_bpp_x16 = range.max_target_bpp_x16; + } else if (target_bandwidth_kbps >= range.max_kbps) { + /* use max target bpp allowed */ + *target_bpp_x16 = range.max_target_bpp_x16; + } else if (target_bandwidth_kbps >= range.min_kbps) { + /* use target bpp that can take entire target bandwidth */ + *target_bpp_x16 = compute_bpp_x16_from_target_bandwidth( + target_bandwidth_kbps, timing, num_slices_h, + dsc_common_caps->bpp_increment_div, + dsc_common_caps->is_dp); + } } - return should_use_dsc; + return *target_bpp_x16 != 0; } #define MIN_AVAILABLE_SLICES_SIZE 4 @@ -994,19 +967,45 @@ bool dc_dsc_compute_config( uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp) { - struct fixed31_32 overhead_in_kbps; + uint32_t overhead_in_kbps; struct fixed31_32 bpp; struct fixed31_32 actual_bandwidth_in_kbps; - overhead_in_kbps = compute_dsc_max_bandwidth_overhead( + overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps( timing, num_slices_h, is_dp); bpp = dc_fixpt_from_fraction(bpp_x16, 16); actual_bandwidth_in_kbps = dc_fixpt_from_fraction(timing->pix_clk_100hz, 10); actual_bandwidth_in_kbps = dc_fixpt_mul(actual_bandwidth_in_kbps, bpp); - actual_bandwidth_in_kbps = dc_fixpt_add(actual_bandwidth_in_kbps, overhead_in_kbps); + actual_bandwidth_in_kbps = dc_fixpt_add_int(actual_bandwidth_in_kbps, overhead_in_kbps); return dc_fixpt_ceil(actual_bandwidth_in_kbps); } +uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( + const struct dc_crtc_timing *timing, + const int num_slices_h, + const bool is_dp) +{ + struct fixed31_32 max_dsc_overhead; + struct fixed31_32 refresh_rate; + + if (dsc_policy_disable_dsc_stream_overhead || !is_dp) + return 0; + + /* use target bpp that can take entire target bandwidth */ + refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz); + refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total); + refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total); + refresh_rate = dc_fixpt_mul_int(refresh_rate, 100); + + max_dsc_overhead = dc_fixpt_from_int(num_slices_h); + max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total); + max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256); + max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000); + max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate); + + return dc_fixpt_ceil(max_dsc_overhead); +} + void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, struct dc_dsc_policy *policy) @@ -1064,8 +1063,6 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, return; } - policy->preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16; - /* internal upper limit, default 16 bpp */ if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit) policy->max_target_bpp = dsc_policy_max_target_bpp_limit; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 45a6216dfa2a..0fea258c6db3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -247,6 +247,12 @@ struct resource_pool { unsigned int dig_link_enc_count; #if defined(CONFIG_DRM_AMD_DC_DCN) + unsigned int hpo_dp_stream_enc_count; + struct hpo_dp_stream_encoder *hpo_dp_stream_enc[MAX_HPO_DP2_ENCODERS]; + unsigned int hpo_dp_link_enc_count; + struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS]; +#endif +#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_3dlut *mpc_lut[MAX_PIPES]; struct dc_transfer_func *mpc_shaper[MAX_PIPES]; #endif @@ -298,6 +304,9 @@ struct stream_resource { struct display_stream_compressor *dsc; struct timing_generator *tg; struct stream_encoder *stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_stream_encoder *hpo_dp_stream_enc; +#endif struct audio *audio; struct pixel_clk_params pix_clk_params; @@ -366,6 +375,9 @@ struct pipe_ctx { struct _vcs_dpi_display_ttu_regs_st ttu_regs; struct _vcs_dpi_display_rq_regs_st rq_regs; struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param; + struct _vcs_dpi_display_rq_params_st dml_rq_param; + struct _vcs_dpi_display_dlg_sys_params_st dml_dlg_sys_param; + struct _vcs_dpi_display_e2e_pipe_params_st dml_input; int det_buffer_size_kb; bool unbounded_req; #endif @@ -375,6 +387,17 @@ struct pipe_ctx { bool vtp_locked; }; +/* Data used for dynamic link encoder assignment. + * Tracks current and future assignments; available link encoders; + * and mode of operation (whether to use current or future assignments). + */ +struct link_enc_cfg_context { + enum link_enc_cfg_mode mode; + struct link_enc_assignment link_enc_assignments[MAX_PIPES]; + enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS]; + struct link_enc_assignment transient_assignments[MAX_PIPES]; +}; + struct resource_context { struct pipe_ctx pipe_ctx[MAX_PIPES]; bool is_stream_enc_acquired[MAX_PIPES * 2]; @@ -382,12 +405,10 @@ struct resource_context { uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES]; uint8_t dp_clock_source_ref_count; bool is_dsc_acquired[MAX_PIPES]; - /* A table/array of encoder-to-link assignments. One entry per stream. - * Indexed by stream index in dc_state. - */ - struct link_enc_assignment link_enc_assignments[MAX_PIPES]; - /* List of available link encoders. Uses engine ID as encoder identifier. */ - enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS]; + struct link_enc_cfg_context link_enc_cfg_ctx; +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; +#endif #if defined(CONFIG_DRM_AMD_DC_DCN) bool is_mpc_3dlut_acquired[MAX_PIPES]; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 01c3a31be191..cb8e785a866e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -30,6 +30,7 @@ #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ #define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/ #define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ +#define MAX_MTP_SLOT_COUNT 64 #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 #define TRAINING_AUX_RD_INTERVAL 100 //us @@ -165,7 +166,7 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); -bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable); +bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable); @@ -189,5 +190,26 @@ enum dc_status dpcd_configure_lttpr_mode( struct link_training_settings *lt_settings); enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings); +bool dpcd_write_128b_132b_sst_payload_allocation_table( + const struct dc_stream_state *stream, + struct dc_link *link, + struct link_mst_stream_allocation_table *proposed_table, + bool allocate); + +enum dc_status dpcd_configure_channel_coding( + struct dc_link *link, + struct link_training_settings *lt_settings); + +bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link); + +struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( + const struct dc_stream_state *stream, + const struct dc_link *link); +void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings); +void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal); +void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable); +bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); +void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link); + bool dp_retrieve_lttpr_cap(struct dc_link *link); #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 0afa2364a986..09237d5819f4 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -79,7 +79,30 @@ struct dccg_funcs { void (*otg_drop_pixel)(struct dccg *dccg, uint32_t otg_inst); void (*dccg_init)(struct dccg *dccg); +#if defined(CONFIG_DRM_AMD_DC_DCN) + void (*set_dpstreamclk)( + struct dccg *dccg, + enum hdmistreamclk_source src, + int otg_inst); + + void (*enable_symclk32_se)( + struct dccg *dccg, + int hpo_se_inst, + enum phyd32clk_clock_source phyd32clk); + + void (*disable_symclk32_se)( + struct dccg *dccg, + int hpo_se_inst); + + void (*enable_symclk32_le)( + struct dccg *dccg, + int hpo_le_inst, + enum phyd32clk_clock_source phyd32clk); + void (*disable_symclk32_le)( + struct dccg *dccg, + int hpo_le_inst); +#endif void (*set_physymclk)( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 31a1713bb49f..10ecbc667ffa 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -38,6 +38,10 @@ #define MAX_PIPES 6 #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define MAX_HPO_DP2_ENCODERS 4 +#define MAX_HPO_DP2_LINK_ENCODERS 2 +#endif struct gamma_curve { uint32_t offset; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 9eaf345aa2a1..bb0e91756ddd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -59,6 +59,10 @@ struct encoder_feature_support { uint32_t IS_TPS3_CAPABLE:1; uint32_t IS_TPS4_CAPABLE:1; uint32_t HDMI_6GB_EN:1; + uint32_t IS_DP2_CAPABLE:1; + uint32_t IS_UHBR10_CAPABLE:1; + uint32_t IS_UHBR13_5_CAPABLE:1; + uint32_t IS_UHBR20_CAPABLE:1; uint32_t DP_IS_USB_C:1; } bits; uint32_t raw; @@ -208,6 +212,99 @@ struct link_enc_assignment { bool valid; struct display_endpoint_id ep_id; enum engine_id eng_id; + struct dc_stream_state *stream; }; +enum link_enc_cfg_mode { + LINK_ENC_CFG_STEADY, /* Normal operation - use current_state. */ + LINK_ENC_CFG_TRANSIENT /* During commit state - use state to be committed. */ +}; + +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dp2_link_mode { + DP2_LINK_TRAINING_TPS1, + DP2_LINK_TRAINING_TPS2, + DP2_LINK_ACTIVE, + DP2_TEST_PATTERN +}; + +enum dp2_phy_tp_select { + DP_DPHY_TP_SELECT_TPS1, + DP_DPHY_TP_SELECT_TPS2, + DP_DPHY_TP_SELECT_PRBS, + DP_DPHY_TP_SELECT_CUSTOM, + DP_DPHY_TP_SELECT_SQUARE +}; + +enum dp2_phy_tp_prbs { + DP_DPHY_TP_PRBS7, + DP_DPHY_TP_PRBS9, + DP_DPHY_TP_PRBS11, + DP_DPHY_TP_PRBS15, + DP_DPHY_TP_PRBS23, + DP_DPHY_TP_PRBS31 +}; + +struct hpo_dp_link_enc_state { + uint32_t link_enc_enabled; + uint32_t link_mode; + uint32_t lane_count; + uint32_t slot_count[4]; + uint32_t stream_src[4]; + uint32_t vc_rate_x[4]; + uint32_t vc_rate_y[4]; +}; + +struct hpo_dp_link_encoder { + const struct hpo_dp_link_encoder_funcs *funcs; + struct dc_context *ctx; + int inst; + enum engine_id preferred_engine; + enum transmitter transmitter; + enum hpd_source_id hpd_source; +}; + +struct hpo_dp_link_encoder_funcs { + + void (*enable_link_phy)(struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + enum transmitter transmitter); + + void (*disable_link_phy)(struct hpo_dp_link_encoder *link_enc, + enum signal_type signal); + + void (*link_enable)( + struct hpo_dp_link_encoder *enc, + enum dc_lane_count num_lanes); + + void (*link_disable)( + struct hpo_dp_link_encoder *enc); + + void (*set_link_test_pattern)( + struct hpo_dp_link_encoder *enc, + struct encoder_set_dp_phy_pattern_param *tp_params); + + void (*update_stream_allocation_table)( + struct hpo_dp_link_encoder *enc, + const struct link_mst_stream_allocation_table *table); + + void (*set_throttled_vcp_size)( + struct hpo_dp_link_encoder *enc, + uint32_t stream_encoder_inst, + struct fixed31_32 avg_time_slots_per_mtp); + + bool (*is_in_alt_mode) ( + struct hpo_dp_link_encoder *enc); + + void (*read_state)( + struct hpo_dp_link_encoder *enc, + struct hpo_dp_link_enc_state *state); + + void (*set_ffe)( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + uint8_t ffe_preset); +}; +#endif + #endif /* LINK_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 564ea6a727b0..c88e113b94d1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -165,9 +165,11 @@ struct stream_encoder_funcs { struct stream_encoder *enc); void (*dp_blank)( + struct dc_link *link, struct stream_encoder *enc); void (*dp_unblank)( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param); @@ -227,7 +229,8 @@ struct stream_encoder_funcs { void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc, bool enable, - uint8_t *dsc_packed_pps); + uint8_t *dsc_packed_pps, + bool immediate_update); void (*set_dynamic_metadata)(struct stream_encoder *enc, bool enable, @@ -242,4 +245,86 @@ struct stream_encoder_funcs { struct stream_encoder *enc); }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct hpo_dp_stream_encoder_state { + uint32_t stream_enc_enabled; + uint32_t vid_stream_enabled; + uint32_t otg_inst; + uint32_t pixel_encoding; + uint32_t component_depth; + uint32_t compressed_format; + uint32_t sdp_enabled; + uint32_t mapped_to_link_enc; +}; + +struct hpo_dp_stream_encoder { + const struct hpo_dp_stream_encoder_funcs *funcs; + struct dc_context *ctx; + struct dc_bios *bp; + uint32_t inst; + enum engine_id id; + struct vpg *vpg; + struct apg *apg; +}; + +struct hpo_dp_stream_encoder_funcs { + void (*enable_stream)( + struct hpo_dp_stream_encoder *enc); + + void (*dp_unblank)( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_source); + + void (*dp_blank)( + struct hpo_dp_stream_encoder *enc); + + void (*disable)( + struct hpo_dp_stream_encoder *enc); + + void (*set_stream_attribute)( + struct hpo_dp_stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, + bool compressed_format, + bool double_buffer_en); + + void (*update_dp_info_packets)( + struct hpo_dp_stream_encoder *enc, + const struct encoder_info_frame *info_frame); + + void (*stop_dp_info_packets)( + struct hpo_dp_stream_encoder *enc); + + void (*dp_set_dsc_pps_info_packet)( + struct hpo_dp_stream_encoder *enc, + bool enable, + uint8_t *dsc_packed_pps, + bool immediate_update); + + void (*map_stream_to_link)( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_enc_inst, + uint32_t link_enc_inst); + + void (*audio_mute_control)( + struct hpo_dp_stream_encoder *enc, bool mute); + + void (*dp_audio_setup)( + struct hpo_dp_stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info); + + void (*dp_audio_enable)( + struct hpo_dp_stream_encoder *enc); + + void (*dp_audio_disable)( + struct hpo_dp_stream_encoder *enc); + + void (*read_state)( + struct hpo_dp_stream_encoder *enc, + struct hpo_dp_stream_encoder_state *state); +}; +#endif + #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 03f47f23fb65..7390baf916b5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -100,6 +100,9 @@ enum crc_selection { enum otg_out_mux_dest { OUT_MUX_DIO = 0, +#if defined(CONFIG_DRM_AMD_DC_DCN) + OUT_MUX_HPO_DP = 2, +#endif }; enum h_timing_div_mode { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index ad5f2adcc40d..d50f4bd06b5d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -236,7 +236,7 @@ struct hw_sequencer_funcs { const struct tg_color *solid_color, int width, int height, int offset); - void (*z10_restore)(struct dc *dc); + void (*z10_restore)(const struct dc *dc); void (*z10_save_init)(struct dc *dc); void (*update_visual_confirm_color)(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index f7f7e4fff0c2..d09eed7bcc4a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -41,6 +41,9 @@ struct dce_hwseq_wa { bool DEGVIDCN10_254; bool DEGVIDCN21; bool disallow_self_refresh_during_multi_plane_transition; +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool dp_hpo_and_otg_sequence; +#endif }; struct hwseq_wa_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h index 883dd8733ea4..83b2199b2c83 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h @@ -70,22 +70,36 @@ void link_enc_cfg_link_enc_unassign( * endpoint. */ bool link_enc_cfg_is_transmitter_mappable( - struct dc_state *state, + struct dc *dc, struct link_encoder *link_enc); +/* Return stream using DIG link encoder resource. NULL if unused. */ +struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc( + struct dc *dc, + enum engine_id eng_id); + /* Return link using DIG link encoder resource. NULL if unused. */ struct dc_link *link_enc_cfg_get_link_using_link_enc( - struct dc_state *state, + struct dc *dc, enum engine_id eng_id); /* Return DIG link encoder used by link. NULL if unused. */ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( - struct dc_state *state, + struct dc *dc, const struct dc_link *link); /* Return next available DIG link encoder. NULL if none available. */ -struct link_encoder *link_enc_cfg_get_next_avail_link_enc( - const struct dc *dc, - const struct dc_state *state); +struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc); + +/* Return DIG link encoder used by stream. NULL if unused. */ +struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( + struct dc *dc, + const struct dc_stream_state *stream); + +/* Return true if encoder available to use. */ +bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id); + +/* Returns true if encoder assignments in supplied state pass validity checks. */ +bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state); #endif /* DC_INC_LINK_ENC_CFG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index fc1d289bb9fe..ba664bc49595 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -37,6 +37,7 @@ void dp_enable_link_phy( const struct dc_link_settings *link_settings); void dp_receiver_power_ctrl(struct dc_link *link, bool on); +void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode); void edp_add_delay_for_T9(struct dc_link *link); bool edp_receiver_ready_T9(struct dc_link *link); bool edp_receiver_ready_T7(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index fe1e5833c96a..3fbda9d7e257 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -49,6 +49,10 @@ struct resource_caps { int num_vmid; int num_dsc; unsigned int num_dig_link_enc; // Total number of DIGs (digital encoders) in DIO (Display Input/Output). +#if defined(CONFIG_DRM_AMD_DC_DCN) + int num_hpo_dp_stream_encoder; + int num_hpo_dp_link_encoder; +#endif int num_mpc_3dlut; }; @@ -68,6 +72,15 @@ struct resource_create_funcs { struct stream_encoder *(*create_stream_encoder)( enum engine_id eng_id, struct dc_context *ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)( + enum engine_id eng_id, struct dc_context *ctx); + + struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)( + uint8_t inst, + struct dc_context *ctx); +#endif + struct dce_hwseq *(*create_hwseq)( struct dc_context *ctx); }; @@ -187,4 +200,9 @@ int get_num_mpc_splits(struct pipe_ctx *pipe); int get_num_odm_splits(struct pipe_ctx *pipe); +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( + const struct resource_pool *pool); +#endif + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index c4b067d01895..49d87fe5c167 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -132,6 +132,31 @@ enum dc_irq_source to_dal_irq_source_dcn20( } } +uint32_t dal_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source) +{ + const struct irq_source_info *info; + uint32_t addr; + uint32_t value; + uint32_t current_status; + + info = find_irq_source_info(irq_service, source); + if (!info) + return 0; + + addr = info->status_reg; + if (!addr) + return 0; + + value = dm_read_reg(irq_service->ctx, addr); + current_status = + get_reg_field_value( + value, + HPD0_DC_HPD_INT_STATUS, + DC_HPD_SENSE); + + return current_status; +} + static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h index aee4b37999f1..f60a203e7188 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h @@ -31,4 +31,6 @@ struct irq_service *dal_irq_service_dcn20_create( struct irq_service_init_data *init_data); +uint32_t dal_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index ed54e1c819be..685528734575 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -135,6 +135,31 @@ enum dc_irq_source to_dal_irq_source_dcn21( return DC_IRQ_SOURCE_INVALID; } +uint32_t dal_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source) +{ + const struct irq_source_info *info; + uint32_t addr; + uint32_t value; + uint32_t current_status; + + info = find_irq_source_info(irq_service, source); + if (!info) + return 0; + + addr = info->status_reg; + if (!addr) + return 0; + + value = dm_read_reg(irq_service->ctx, addr); + current_status = + get_reg_field_value( + value, + HPD0_DC_HPD_INT_STATUS, + DC_HPD_SENSE); + + return current_status; +} + static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h index da2bd0e93d7a..3df2ceeb2b70 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h @@ -31,4 +31,6 @@ struct irq_service *dal_irq_service_dcn21_create( struct irq_service_init_data *init_data); +uint32_t dal_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index a2a4fbeb83f8..4db1133e4466 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service) *irq_service = NULL; } -static const struct irq_source_info *find_irq_source_info( +const struct irq_source_info *find_irq_source_info( struct irq_service *irq_service, enum dc_irq_source source) { diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h index dbfcb096eedd..e60b82480093 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h @@ -69,6 +69,10 @@ struct irq_service { const struct irq_service_funcs *funcs; }; +const struct irq_source_info *find_irq_source_info( + struct irq_service *irq_service, + enum dc_irq_source source); + void dal_irq_service_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data); diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index 1053b165c139..1e39aae6b1cf 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -69,9 +69,11 @@ static void virtual_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc) {} static void virtual_stream_encoder_dp_blank( + struct dc_link *link, struct stream_encoder *enc) {} static void virtual_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) {} @@ -102,7 +104,8 @@ static void virtual_setup_stereo_sync( static void virtual_stream_encoder_set_dsc_pps_info_packet( struct stream_encoder *enc, bool enable, - uint8_t *dsc_packed_pps) + uint8_t *dsc_packed_pps, + bool immediate_update) {} static const struct stream_encoder_funcs virtual_str_enc_funcs = { diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index caf961bb633f..ef324fc39315 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -358,6 +358,8 @@ struct dmub_srv_hw_funcs { uint32_t (*get_current_time)(struct dmub_srv *dmub); void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca); + + bool (*should_detect)(struct dmub_srv *dmub); }; /** @@ -724,6 +726,8 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); +bool dmub_srv_should_detect(struct dmub_srv *dmub); + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7efe9ba8706e..13a723395de2 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x7383caadc +#define DMUB_FW_VERSION_GIT_HASH 0x607c9623 #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 79 +#define DMUB_FW_VERSION_REVISION 84 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 @@ -371,7 +371,8 @@ union dmub_fw_boot_options { uint32_t reserved2: 1; /**< reserved for an unreleased feature */ uint32_t reserved_unreleased1: 1; /**< reserved for an unreleased feature */ uint32_t invalid_vbios_data: 1; /**< 1 if VBIOS data table is invalid */ - uint32_t reserved : 23; /**< reserved */ + uint32_t reserved_unreleased2: 1; /**< reserved for an unreleased feature */ + uint32_t reserved : 22; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -973,7 +974,7 @@ struct dmub_dig_transmitter_control_data_v1_7 { uint8_t hpdsel; /**< =1: HPD1, =2: HPD2, ..., =6: HPD6, =0: HPD is not assigned */ uint8_t digfe_sel; /**< DIG front-end selection, bit0 means DIG0 FE is enabled */ uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */ - uint8_t reserved0; /**< For future use */ + uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */ uint8_t reserved1; /**< For future use */ uint8_t reserved2[3]; /**< For future use */ uint32_t reserved3[11]; /**< For future use */ @@ -2484,14 +2485,16 @@ static inline bool dmub_rb_full(struct dmub_rb *rb) static inline bool dmub_rb_push_front(struct dmub_rb *rb, const union dmub_rb_cmd *cmd) { - uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt; - const uint8_t *src = (const uint8_t *)cmd; + uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t); + const uint64_t *src = (const uint64_t *)cmd; + uint8_t i; if (dmub_rb_full(rb)) return false; // copying data - dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE); + for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) + *dst++ = *src++; rb->wrpt += DMUB_RB_CMD_SIZE; @@ -2600,14 +2603,16 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb, static inline bool dmub_rb_out_front(struct dmub_rb *rb, union dmub_rb_out_cmd *cmd) { - const uint8_t *src = (const uint8_t *)(rb->base_address) + rb->rptr; - uint8_t *dst = (uint8_t *)cmd; + const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t); + uint64_t *dst = (uint64_t *)cmd; + uint8_t i; if (dmub_rb_empty(rb)) return false; // copying data - dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE); + for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) + *dst++ = *src++; return true; } @@ -2642,14 +2647,17 @@ static inline bool dmub_rb_pop_front(struct dmub_rb *rb) */ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) { - uint8_t buf[DMUB_RB_CMD_SIZE]; uint32_t rptr = rb->rptr; uint32_t wptr = rb->wrpt; while (rptr != wptr) { - const uint8_t *data = (const uint8_t *)rb->base_address + rptr; + uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t); + //uint64_t volatile *p = (uint64_t volatile *)data; + uint64_t temp; + uint8_t i; - dmub_memcpy(buf, data, DMUB_RB_CMD_SIZE); + for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) + temp = *data++; rptr += DMUB_RB_CMD_SIZE; if (rptr >= rb->capacity) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index fc667cb17eb0..6ac370c15dea 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -432,3 +432,11 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; } + +bool dmub_dcn31_should_detect(struct dmub_srv *dmub) +{ + uint32_t fw_boot_status = REG_READ(DMCUB_SCRATCH0); + bool should_detect = fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED; + return should_detect; +} + diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index bb62605d2ac8..59ddc81b5a0e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -245,4 +245,6 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub); void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); +bool dmub_dcn31_should_detect(struct dmub_srv *dmub); + #endif /* _DMUB_DCN31_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 75a91cfaf036..a6188d067d65 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -234,7 +234,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr; funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data; - + funcs->should_detect = dmub_dcn31_should_detect; funcs->get_current_time = dmub_dcn31_get_current_time; break; @@ -816,3 +816,11 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_ dmub->hw_funcs.get_diagnostic_data(dmub, diag_data); return true; } + +bool dmub_srv_should_detect(struct dmub_srv *dmub) +{ + if (!dmub->hw_init || !dmub->hw_funcs.should_detect) + return false; + + return dmub->hw_funcs.should_detect(dmub); +} diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index 76a87b682883..b8ffb216ebc4 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -152,6 +152,10 @@ struct bp_transmitter_control { enum signal_type signal; enum dc_color_depth color_depth; /* not used for DCE6.0 */ enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + enum tx_ffe_id txffe_sel; /* used for DCN3 */ + enum engine_id hpo_engine_id; /* used for DCN3 */ +#endif struct graphics_object_id connector_obj_id; /* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should * be pixel clock * deep_color_ratio (in KHz) @@ -319,6 +323,10 @@ struct bp_encoder_cap_info { uint32_t DP_HBR2_EN:1; uint32_t DP_HBR3_EN:1; uint32_t HDMI_6GB_EN:1; + uint32_t IS_DP2_CAPABLE:1; + uint32_t DP_UHBR10_EN:1; + uint32_t DP_UHBR13_5_EN:1; + uint32_t DP_UHBR20_EN:1; uint32_t DP_IS_USB_C:1; uint32_t RESERVED:27; }; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 381c17caace1..3d2f0817e40a 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -227,7 +227,7 @@ enum { #define FAMILY_YELLOW_CARP 146 #define YELLOW_CARP_A0 0x01 -#define YELLOW_CARP_B0 0x02 // TODO: DCN31 - update with correct B0 ID +#define YELLOW_CARP_B0 0x20 #define YELLOW_CARP_UNKNOWN 0xFF #ifndef ASICREV_IS_YELLOW_CARP diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index aec7389aff37..ffd0df1701e6 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -80,6 +80,15 @@ enum dpcd_phy_test_patterns { PHY_TEST_PATTERN_CP2520_1, PHY_TEST_PATTERN_CP2520_2, PHY_TEST_PATTERN_CP2520_3, /* same as TPS4 */ + PHY_TEST_PATTERN_128b_132b_TPS1 = 0x8, + PHY_TEST_PATTERN_128b_132b_TPS2 = 0x10, + PHY_TEST_PATTERN_PRBS9 = 0x18, + PHY_TEST_PATTERN_PRBS11 = 0x20, + PHY_TEST_PATTERN_PRBS15 = 0x28, + PHY_TEST_PATTERN_PRBS23 = 0x30, + PHY_TEST_PATTERN_PRBS31 = 0x38, + PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40, + PHY_TEST_PATTERN_SQUARE_PULSE = 0x48, }; enum dpcd_test_dyn_range { @@ -135,7 +144,14 @@ enum dpcd_training_patterns { DPCD_TRAINING_PATTERN_1, DPCD_TRAINING_PATTERN_2, DPCD_TRAINING_PATTERN_3, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DPCD_TRAINING_PATTERN_4 = 7, + DPCD_128b_132b_TPS1 = 1, + DPCD_128b_132b_TPS2 = 2, + DPCD_128b_132b_TPS2_CDS = 3, +#else DPCD_TRAINING_PATTERN_4 = 7 +#endif }; /* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus @@ -149,6 +165,7 @@ enum dpcd_psr_sink_states { PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7, }; +#define DP_SOURCE_SEQUENCE 0x30c #define DP_SOURCE_TABLE_REVISION 0x310 #define DP_SOURCE_PAYLOAD_SIZE 0x311 #define DP_SOURCE_SINK_CAP 0x317 diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h index 58bb42ed85ca..84b299ff500a 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h @@ -140,6 +140,18 @@ enum sync_source { SYNC_SOURCE_DUAL_GPU_PIN }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum tx_ffe_id { + TX_FFE0 = 0, + TX_FFE1, + TX_FFE2, + TX_FFE3, + TX_FFE_DeEmphasis_Only, + TX_FFE_PreShoot_Only, + TX_FFE_No_FFE, +}; +#endif + /* connector sizes in millimeters - from BiosParserTypes.hpp */ #define CONNECTOR_SIZE_DVI 40 #define CONNECTOR_SIZE_VGA 32 diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index 33b3d755fe65..01775417cf4b 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -184,6 +184,14 @@ enum engine_id { ENGINE_ID_DACA, ENGINE_ID_DACB, ENGINE_ID_VCE, /* wireless display pseudo-encoder */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + ENGINE_ID_HPO_0, + ENGINE_ID_HPO_1, + ENGINE_ID_HPO_DP_0, + ENGINE_ID_HPO_DP_1, + ENGINE_ID_HPO_DP_2, + ENGINE_ID_HPO_DP_3, +#endif ENGINE_ID_VIRTUAL, ENGINE_ID_COUNT, diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 32f5274ed34e..9ffea7b40545 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -53,7 +53,11 @@ enum edp_revision { }; enum { - LINK_RATE_REF_FREQ_IN_KHZ = 27000 /*27MHz*/ + LINK_RATE_REF_FREQ_IN_KHZ = 27000, /*27MHz*/ + BITS_PER_DP_BYTE = 10, + DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */ + DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */ + DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */ }; enum link_training_result { @@ -70,6 +74,12 @@ enum link_training_result { LINK_TRAINING_LINK_LOSS, /* Abort link training (because sink unplugged) */ LINK_TRAINING_ABORT, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_128b_132b_LT_FAILED, + DP_128b_132b_MAX_LOOP_COUNT_REACHED, + DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT, + DP_128b_132b_CDS_DONE_TIMEOUT, +#endif }; enum lttpr_mode { @@ -86,11 +96,23 @@ struct link_training_settings { enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; bool should_set_fec_ready; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* TODO - factor lane_settings out because it changes during LT */ + union dc_dp_ffe_preset *ffe_preset; +#endif uint16_t cr_pattern_time; uint16_t eq_pattern_time; + uint16_t cds_pattern_time; enum dc_dp_training_pattern pattern_for_cr; enum dc_dp_training_pattern pattern_for_eq; +#if defined(CONFIG_DRM_AMD_DC_DCN) + enum dc_dp_training_pattern pattern_for_cds; + + uint32_t eq_wait_time_limit; + uint8_t eq_loop_count_limit; + uint32_t cds_wait_time_limit; +#endif bool enhanced_framing; bool allow_invalid_msa_timing_param; @@ -114,13 +136,30 @@ enum dp_test_pattern { DP_TEST_PATTERN_CP2520_2, DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE = DP_TEST_PATTERN_CP2520_2, DP_TEST_PATTERN_CP2520_3, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_TEST_PATTERN_128b_132b_TPS1, + DP_TEST_PATTERN_128b_132b_TPS2, + DP_TEST_PATTERN_PRBS9, + DP_TEST_PATTERN_PRBS11, + DP_TEST_PATTERN_PRBS15, + DP_TEST_PATTERN_PRBS23, + DP_TEST_PATTERN_PRBS31, + DP_TEST_PATTERN_264BIT_CUSTOM, + DP_TEST_PATTERN_SQUARE_PULSE, +#endif /* Link Training Patterns */ DP_TEST_PATTERN_TRAINING_PATTERN1, DP_TEST_PATTERN_TRAINING_PATTERN2, DP_TEST_PATTERN_TRAINING_PATTERN3, DP_TEST_PATTERN_TRAINING_PATTERN4, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE, + DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE, + DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE, +#else DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_TRAINING_PATTERN4, +#endif /* link test patterns*/ DP_TEST_PATTERN_COLOR_SQUARES, @@ -152,6 +191,22 @@ enum dp_panel_mode { DP_PANEL_MODE_SPECIAL }; +enum dpcd_source_sequence { + DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG = 1, /*done in apply_single_controller_ctx_to_hw */ + DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR, /*done in core_link_enable_stream */ + DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME, /*done in core_link_enable_stream/dcn20_enable_stream */ + DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE, /*done in perform_link_training_with_retries/dcn20_enable_stream */ + DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY, /*done in dp_enable_link_phy */ + DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN, /*done in dp_set_hw_test_pattern */ + DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM, /*done in dce110_enable_audio_stream */ + DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM, /*done in enc1_stream_encoder_dp_unblank */ + DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM, /*done in enc1_stream_encoder_dp_blank */ + DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET, /*done in enc1_stream_encoder_dp_blank */ + DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM, /*done in dce110_disable_audio_stream */ + DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY, /*done in dp_disable_link_phy */ + DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE, /*done in dce110_disable_stream */ +}; + /* DPCD_ADDR_TRAINING_LANEx_SET registers value */ union dpcd_training_lane_set { struct { diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 571fcf23cea9..370fad883e33 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -72,6 +72,9 @@ #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__) +#endif struct dal_logger; @@ -123,6 +126,9 @@ enum dc_log_type { LOG_MAX_HW_POINTS, LOG_ALL_TF_CHANNELS, LOG_SAMPLE_1DLUT, +#if defined(CONFIG_DRM_AMD_DC_DCN) + LOG_DP2, +#endif LOG_SECTION_TOTAL_COUNT }; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index ef742d95ef05..64a38f08f497 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -54,12 +54,17 @@ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2]; * just multiply with 2^gamma which can be computed once, and save the result so we * recursively compute all the values. */ - /*sRGB 709 2.2 2.4 P3*/ -static const int32_t gamma_numerator01[] = { 31308, 180000, 0, 0, 0}; -static const int32_t gamma_numerator02[] = { 12920, 4500, 0, 0, 0}; -static const int32_t gamma_numerator03[] = { 55, 99, 0, 0, 0}; -static const int32_t gamma_numerator04[] = { 55, 99, 0, 0, 0}; -static const int32_t gamma_numerator05[] = { 2400, 2200, 2200, 2400, 2600}; + +/* + * Regamma coefficients are used for both regamma and degamma. Degamma + * coefficients are calculated in our formula using the regamma coefficients. + */ + /*sRGB 709 2.2 2.4 P3*/ +static const int32_t numerator01[] = { 31308, 180000, 0, 0, 0}; +static const int32_t numerator02[] = { 12920, 4500, 0, 0, 0}; +static const int32_t numerator03[] = { 55, 99, 0, 0, 0}; +static const int32_t numerator04[] = { 55, 99, 0, 0, 0}; +static const int32_t numerator05[] = { 2400, 2200, 2200, 2400, 2600}; /* one-time setup of X points */ void setup_x_points_distribution(void) @@ -288,7 +293,8 @@ struct dividers { }; -static bool build_coefficients(struct gamma_coefficients *coefficients, enum dc_transfer_func_predefined type) +static bool build_coefficients(struct gamma_coefficients *coefficients, + enum dc_transfer_func_predefined type) { uint32_t i = 0; @@ -312,15 +318,15 @@ static bool build_coefficients(struct gamma_coefficients *coefficients, enum dc_ do { coefficients->a0[i] = dc_fixpt_from_fraction( - gamma_numerator01[index], 10000000); + numerator01[index], 10000000); coefficients->a1[i] = dc_fixpt_from_fraction( - gamma_numerator02[index], 1000); + numerator02[index], 1000); coefficients->a2[i] = dc_fixpt_from_fraction( - gamma_numerator03[index], 1000); + numerator03[index], 1000); coefficients->a3[i] = dc_fixpt_from_fraction( - gamma_numerator04[index], 1000); + numerator04[index], 1000); coefficients->user_gamma[i] = dc_fixpt_from_fraction( - gamma_numerator05[index], 1000); + numerator05[index], 1000); ++i; } while (i != ARRAY_SIZE(coefficients->a0)); @@ -1685,7 +1691,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma struct pwl_float_data_ex *rgb = rgb_regamma; const struct hw_x_point *coord_x = coordinates_x; - build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB); + build_coefficients(&coeff, true); i = 0; while (i != hw_points_num + 1) { diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h index 92caf8441d1e..01a56556cde1 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h @@ -11932,5 +11932,32 @@ #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_RX_OVRD_OUT_2 0xe0c7 #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_TX_OVRD_IN_2 0xe0c8 +//RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6 +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L + +//RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6 +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L + +//[Note] Hack. RDPCSPIPE only has 2 instances. +#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6 0x2d73 +#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 +#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6 0x2e4b +#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 +#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6 0x2d73 +#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 +#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6 0x2e4b +#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 +#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6 0x2d73 +#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 #endif diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 44955458fe38..7bd763361d6e 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -768,6 +768,10 @@ enum atom_encoder_caps_def ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not. ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board. + ATOM_ENCODER_CAP_RECORD_DP2 =0x10, // DP2 is supported by ASIC/board. + ATOM_ENCODER_CAP_RECORD_UHBR10_EN =0x20, // DP2.0 UHBR10 settings is supported by board + ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN =0x40, // DP2.0 UHBR13.5 settings is supported by board + ATOM_ENCODER_CAP_RECORD_UHBR20_EN =0x80, // DP2.0 UHBR20 settings is supported by board ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type. }; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 249cb0aeb5ae..c255b4b8e685 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2087,10 +2087,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (asic_type < CHIP_VEGA12) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { - if (!(asic_type == CHIP_VANGOGH)) + if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { - if (!(asic_type == CHIP_VANGOGH)) + if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) *states = ATTR_STATE_UNSUPPORTED; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c index bdbbeb959c68..81f82aa05ec2 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c @@ -6867,6 +6867,8 @@ static int si_dpm_enable(struct amdgpu_device *adev) si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); + ni_update_current_ps(adev, boot_ps); + return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index b1ad451af06b..ae11f5ed7a97 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -86,21 +86,21 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 0), MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), - MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), - MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), - MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), - MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable, 0), MSG_MAP(RunBtc, PPSMC_MSG_RunBtc, 0), MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), - MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), - MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0), MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index f0cfb58da467..ac006bed4743 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -390,8 +390,7 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev) { struct ci_power_info *pi = ci_get_pi(rdev); - u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; - u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; + u16 hi_sidd, lo_sidd; struct radeon_cac_tdp_table *cac_tdp_table = rdev->pm.dpm.dyn_state.cac_tdp_table; diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 35b77c944701..9d2bcb9551e6 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -820,12 +820,12 @@ union fan_info { static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) { - u32 size = atom_table->ucNumEntries * - sizeof(struct radeon_clock_voltage_dependency_entry); int i; ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; - radeon_table->entries = kzalloc(size, GFP_KERNEL); + radeon_table->entries = kcalloc(atom_table->ucNumEntries, + sizeof(struct radeon_clock_voltage_dependency_entry), + GFP_KERNEL); if (!radeon_table->entries) return -ENOMEM; @@ -1361,7 +1361,9 @@ u16 r600_get_pcie_lane_support(struct radeon_device *rdev, u8 r600_encode_pci_lane_width(u32 lanes) { - u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; + static const u8 encoded_lanes[] = { + 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 + }; if (lanes > 16) return 0; |